2024-11-21 22:01:46,375 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-21 22:01:46,393 main DEBUG Took 0.015692 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-21 22:01:46,394 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-21 22:01:46,395 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-21 22:01:46,396 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-21 22:01:46,398 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,410 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-21 22:01:46,454 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,457 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,458 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,459 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,464 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,465 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,469 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,470 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,471 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,472 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,474 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,474 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,475 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,476 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,477 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,478 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,478 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,479 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,480 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,480 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,481 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,482 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,483 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,484 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 22:01:46,485 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,486 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-21 22:01:46,489 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 22:01:46,493 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-21 22:01:46,512 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-21 22:01:46,513 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-21 22:01:46,515 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-21 22:01:46,515 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-21 22:01:46,527 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-21 22:01:46,531 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-21 22:01:46,534 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-21 22:01:46,534 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-21 22:01:46,534 main DEBUG createAppenders(={Console}) 2024-11-21 22:01:46,535 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-11-21 22:01:46,536 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-21 22:01:46,536 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-11-21 22:01:46,536 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-21 22:01:46,537 main DEBUG OutputStream closed 2024-11-21 22:01:46,537 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-21 22:01:46,537 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-21 22:01:46,537 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-11-21 22:01:46,707 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-21 22:01:46,711 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-21 22:01:46,716 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-21 22:01:46,718 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-21 22:01:46,719 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-21 22:01:46,720 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-21 22:01:46,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-21 22:01:46,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-21 22:01:46,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-21 22:01:46,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-21 22:01:46,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-21 22:01:46,724 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-21 22:01:46,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-21 22:01:46,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-21 22:01:46,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-21 22:01:46,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-21 22:01:46,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-21 22:01:46,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-21 22:01:46,730 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21 22:01:46,731 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-11-21 22:01:46,731 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-21 22:01:46,733 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-11-21T22:01:46,759 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-21 22:01:46,764 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-21 22:01:46,764 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21T22:01:47,470 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894 2024-11-21T22:01:47,471 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-21T22:01:47,527 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-21T22:01:47,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T22:01:47,826 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222, deleteOnExit=true 2024-11-21T22:01:47,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T22:01:47,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/test.cache.data in system properties and HBase conf 2024-11-21T22:01:47,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T22:01:47,829 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir in system properties and HBase conf 2024-11-21T22:01:47,831 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T22:01:47,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T22:01:47,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T22:01:47,952 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T22:01:47,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T22:01:47,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T22:01:47,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T22:01:47,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T22:01:47,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T22:01:47,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T22:01:47,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T22:01:47,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T22:01:47,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T22:01:47,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/nfs.dump.dir in system properties and HBase conf 2024-11-21T22:01:47,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir in system properties and HBase conf 2024-11-21T22:01:47,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T22:01:47,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T22:01:47,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T22:01:49,516 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-21T22:01:49,633 INFO [Time-limited test {}] log.Log(170): Logging initialized @4376ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-21T22:01:49,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:01:49,850 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:01:49,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:01:49,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:01:49,895 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T22:01:49,922 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:01:49,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:01:49,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T22:01:50,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a55f3e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-35817-hadoop-hdfs-3_4_1-tests_jar-_-any-2000225120965289757/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T22:01:50,197 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:35817} 2024-11-21T22:01:50,198 INFO [Time-limited test {}] server.Server(415): Started @4942ms 2024-11-21T22:01:50,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:01:50,860 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:01:50,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:01:50,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:01:50,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T22:01:50,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134642c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:01:50,879 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@480cc207{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T22:01:50,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1adf9305{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-44267-hadoop-hdfs-3_4_1-tests_jar-_-any-7513598008471386944/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T22:01:50,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:44267} 2024-11-21T22:01:50,998 INFO [Time-limited test {}] server.Server(415): Started @5742ms 2024-11-21T22:01:51,061 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T22:01:51,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:01:51,229 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:01:51,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:01:51,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:01:51,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T22:01:51,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ed86ab3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:01:51,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@246a58e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T22:01:51,387 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19299b82{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-38821-hadoop-hdfs-3_4_1-tests_jar-_-any-14054012289482813661/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T22:01:51,388 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:38821} 2024-11-21T22:01:51,388 INFO [Time-limited test {}] server.Server(415): Started @6133ms 2024-11-21T22:01:51,393 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T22:01:51,478 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:01:51,489 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:01:51,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:01:51,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:01:51,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T22:01:51,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e41eadb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:01:51,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@544a54bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T22:01:51,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d3bb473{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-46809-hadoop-hdfs-3_4_1-tests_jar-_-any-17933545891152490885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T22:01:51,621 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:46809} 2024-11-21T22:01:51,621 INFO [Time-limited test {}] server.Server(415): Started @6366ms 2024-11-21T22:01:51,623 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T22:01:52,713 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877/current, will proceed with Du for space computation calculation, 2024-11-21T22:01:52,713 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877/current, will proceed with Du for space computation calculation, 2024-11-21T22:01:52,717 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877/current, will proceed with Du for space computation calculation, 2024-11-21T22:01:52,723 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877/current, will proceed with Du for space computation calculation, 2024-11-21T22:01:52,768 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T22:01:52,769 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T22:01:52,825 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5885d9ee5c11fe78 with lease ID 0x2de794335db4fc40: Processing first storage report for DS-74b97c14-8623-4374-84bd-cd03bbdbe206 from datanode DatanodeRegistration(127.0.0.1:35185, datanodeUuid=2ea3eedb-5a78-449a-89b6-9d20737bb165, infoPort=44313, infoSecurePort=0, ipcPort=38107, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877) 2024-11-21T22:01:52,826 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5885d9ee5c11fe78 with lease ID 0x2de794335db4fc40: from storage DS-74b97c14-8623-4374-84bd-cd03bbdbe206 node DatanodeRegistration(127.0.0.1:35185, datanodeUuid=2ea3eedb-5a78-449a-89b6-9d20737bb165, infoPort=44313, infoSecurePort=0, ipcPort=38107, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T22:01:52,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16704527153ebc43 with lease ID 0x2de794335db4fc41: Processing first storage report for DS-7a6873e5-82b6-4604-bf0e-68e143180355 from datanode DatanodeRegistration(127.0.0.1:36723, datanodeUuid=fd312820-d742-438d-9626-f8ad2cae6b0d, infoPort=35833, infoSecurePort=0, ipcPort=36021, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877) 2024-11-21T22:01:52,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16704527153ebc43 with lease ID 0x2de794335db4fc41: from storage DS-7a6873e5-82b6-4604-bf0e-68e143180355 node DatanodeRegistration(127.0.0.1:36723, datanodeUuid=fd312820-d742-438d-9626-f8ad2cae6b0d, infoPort=35833, infoSecurePort=0, ipcPort=36021, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T22:01:52,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5885d9ee5c11fe78 with lease ID 0x2de794335db4fc40: Processing first storage report for DS-4c8dab0b-25f3-4e18-80df-cbdac249b180 from datanode DatanodeRegistration(127.0.0.1:35185, datanodeUuid=2ea3eedb-5a78-449a-89b6-9d20737bb165, infoPort=44313, infoSecurePort=0, ipcPort=38107, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877) 2024-11-21T22:01:52,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5885d9ee5c11fe78 with lease ID 0x2de794335db4fc40: from storage DS-4c8dab0b-25f3-4e18-80df-cbdac249b180 node DatanodeRegistration(127.0.0.1:35185, datanodeUuid=2ea3eedb-5a78-449a-89b6-9d20737bb165, infoPort=44313, infoSecurePort=0, ipcPort=38107, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T22:01:52,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16704527153ebc43 with lease ID 0x2de794335db4fc41: Processing first storage report for DS-7e5021bd-0d9f-4077-90e4-5a6f6396a75b from datanode DatanodeRegistration(127.0.0.1:36723, datanodeUuid=fd312820-d742-438d-9626-f8ad2cae6b0d, infoPort=35833, infoSecurePort=0, ipcPort=36021, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877) 2024-11-21T22:01:52,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16704527153ebc43 with lease ID 0x2de794335db4fc41: from storage DS-7e5021bd-0d9f-4077-90e4-5a6f6396a75b node DatanodeRegistration(127.0.0.1:36723, datanodeUuid=fd312820-d742-438d-9626-f8ad2cae6b0d, infoPort=35833, infoSecurePort=0, ipcPort=36021, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T22:01:52,941 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877/current, will proceed with Du for space computation calculation, 2024-11-21T22:01:52,943 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877/current, will proceed with Du for space computation calculation, 2024-11-21T22:01:52,995 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T22:01:53,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x72a8eb0514bc39ca with lease ID 0x2de794335db4fc42: Processing first storage report for DS-544ebff7-bba4-4176-b2a1-6177a252a6f2 from datanode DatanodeRegistration(127.0.0.1:36581, datanodeUuid=e1949d46-2557-4501-97db-f758381d874f, infoPort=44159, infoSecurePort=0, ipcPort=39123, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877) 2024-11-21T22:01:53,002 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72a8eb0514bc39ca with lease ID 0x2de794335db4fc42: from storage DS-544ebff7-bba4-4176-b2a1-6177a252a6f2 node DatanodeRegistration(127.0.0.1:36581, datanodeUuid=e1949d46-2557-4501-97db-f758381d874f, infoPort=44159, infoSecurePort=0, ipcPort=39123, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T22:01:53,002 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x72a8eb0514bc39ca with lease ID 0x2de794335db4fc42: Processing first storage report for DS-d67f44f0-9daf-49e5-a07b-3263280e38cc from datanode DatanodeRegistration(127.0.0.1:36581, datanodeUuid=e1949d46-2557-4501-97db-f758381d874f, infoPort=44159, infoSecurePort=0, ipcPort=39123, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877) 2024-11-21T22:01:53,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72a8eb0514bc39ca with lease ID 0x2de794335db4fc42: from storage DS-d67f44f0-9daf-49e5-a07b-3263280e38cc node DatanodeRegistration(127.0.0.1:36581, datanodeUuid=e1949d46-2557-4501-97db-f758381d874f, infoPort=44159, infoSecurePort=0, ipcPort=39123, storageInfo=lv=-57;cid=testClusterID;nsid=194727888;c=1732226508877), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T22:01:53,015 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894 2024-11-21T22:01:53,117 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/zookeeper_0, clientPort=58709, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T22:01:53,129 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58709 2024-11-21T22:01:53,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:53,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:53,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741825_1001 (size=7) 2024-11-21T22:01:53,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741825_1001 (size=7) 2024-11-21T22:01:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741825_1001 (size=7) 2024-11-21T22:01:53,862 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 with version=8 2024-11-21T22:01:53,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/hbase-staging 2024-11-21T22:01:53,951 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-21T22:01:54,199 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2a884b411602:0 server-side Connection retries=45 2024-11-21T22:01:54,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:54,209 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:54,213 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T22:01:54,213 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:54,213 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T22:01:54,356 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T22:01:54,428 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-21T22:01:54,441 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-21T22:01:54,446 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T22:01:54,482 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 107538 (auto-detected) 2024-11-21T22:01:54,484 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-21T22:01:54,510 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42183 2024-11-21T22:01:54,542 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42183 connecting to ZooKeeper ensemble=127.0.0.1:58709 2024-11-21T22:01:54,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:421830x0, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T22:01:54,660 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42183-0x10160b48b8a0000 connected 2024-11-21T22:01:54,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:54,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:54,838 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:01:54,844 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90, hbase.cluster.distributed=false 2024-11-21T22:01:54,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T22:01:54,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42183 2024-11-21T22:01:54,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42183 2024-11-21T22:01:54,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42183 2024-11-21T22:01:54,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42183 2024-11-21T22:01:54,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42183 2024-11-21T22:01:55,026 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2a884b411602:0 server-side Connection retries=45 2024-11-21T22:01:55,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,028 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T22:01:55,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,029 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T22:01:55,032 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T22:01:55,036 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T22:01:55,037 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41399 2024-11-21T22:01:55,040 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41399 connecting to ZooKeeper ensemble=127.0.0.1:58709 2024-11-21T22:01:55,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413990x0, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T22:01:55,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413990x0, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:01:55,078 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41399-0x10160b48b8a0001 connected 2024-11-21T22:01:55,081 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T22:01:55,090 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T22:01:55,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T22:01:55,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T22:01:55,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41399 2024-11-21T22:01:55,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41399 2024-11-21T22:01:55,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41399 2024-11-21T22:01:55,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41399 2024-11-21T22:01:55,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41399 2024-11-21T22:01:55,126 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2a884b411602:0 server-side Connection retries=45 2024-11-21T22:01:55,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,127 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T22:01:55,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T22:01:55,128 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T22:01:55,128 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T22:01:55,131 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44629 2024-11-21T22:01:55,133 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44629 connecting to ZooKeeper ensemble=127.0.0.1:58709 2024-11-21T22:01:55,134 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,151 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446290x0, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T22:01:55,152 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446290x0, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:01:55,152 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T22:01:55,152 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44629-0x10160b48b8a0002 connected 2024-11-21T22:01:55,154 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T22:01:55,155 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T22:01:55,158 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T22:01:55,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44629 2024-11-21T22:01:55,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44629 2024-11-21T22:01:55,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44629 2024-11-21T22:01:55,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44629 2024-11-21T22:01:55,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44629 2024-11-21T22:01:55,184 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2a884b411602:0 server-side Connection retries=45 2024-11-21T22:01:55,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,185 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T22:01:55,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T22:01:55,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T22:01:55,185 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T22:01:55,186 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T22:01:55,187 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36419 2024-11-21T22:01:55,189 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36419 connecting to ZooKeeper ensemble=127.0.0.1:58709 2024-11-21T22:01:55,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364190x0, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T22:01:55,204 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364190x0, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:01:55,204 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36419-0x10160b48b8a0003 connected 2024-11-21T22:01:55,204 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T22:01:55,205 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T22:01:55,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T22:01:55,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T22:01:55,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36419 2024-11-21T22:01:55,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36419 2024-11-21T22:01:55,223 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36419 2024-11-21T22:01:55,223 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36419 2024-11-21T22:01:55,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36419 2024-11-21T22:01:55,248 DEBUG [M:0;2a884b411602:42183 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2a884b411602:42183 2024-11-21T22:01:55,250 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2a884b411602,42183,1732226514042 2024-11-21T22:01:55,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,262 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,264 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2a884b411602,42183,1732226514042 2024-11-21T22:01:55,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T22:01:55,295 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T22:01:55,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T22:01:55,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,296 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,298 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T22:01:55,299 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2a884b411602,42183,1732226514042 from backup master directory 2024-11-21T22:01:55,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,315 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,313 WARN [master/2a884b411602:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T22:01:55,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2a884b411602,42183,1732226514042 2024-11-21T22:01:55,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T22:01:55,316 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2a884b411602,42183,1732226514042 2024-11-21T22:01:55,319 INFO [master/2a884b411602:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-21T22:01:55,323 INFO [master/2a884b411602:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-21T22:01:55,390 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/hbase.id] with ID: a66a9a66-f6cf-4476-80e0-53486503c165 2024-11-21T22:01:55,390 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.tmp/hbase.id 2024-11-21T22:01:55,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741826_1002 (size=42) 2024-11-21T22:01:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741826_1002 (size=42) 2024-11-21T22:01:55,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741826_1002 (size=42) 2024-11-21T22:01:55,415 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.tmp/hbase.id]:[hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/hbase.id] 2024-11-21T22:01:55,474 INFO [master/2a884b411602:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T22:01:55,480 INFO [master/2a884b411602:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T22:01:55,504 INFO [master/2a884b411602:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-11-21T22:01:55,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,526 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:55,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741827_1003 (size=196) 2024-11-21T22:01:55,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741827_1003 (size=196) 2024-11-21T22:01:55,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741827_1003 (size=196) 2024-11-21T22:01:55,569 INFO [master/2a884b411602:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:01:55,572 INFO [master/2a884b411602:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T22:01:55,591 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:01:55,595 INFO [master/2a884b411602:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T22:01:55,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741828_1004 (size=1189) 2024-11-21T22:01:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741828_1004 (size=1189) 2024-11-21T22:01:55,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741828_1004 (size=1189) 2024-11-21T22:01:55,668 INFO [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/data/master/store 2024-11-21T22:01:55,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741829_1005 (size=34) 2024-11-21T22:01:55,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741829_1005 (size=34) 2024-11-21T22:01:55,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741829_1005 (size=34) 2024-11-21T22:01:55,699 INFO [master/2a884b411602:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-21T22:01:55,703 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:01:55,705 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T22:01:55,705 INFO [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:01:55,705 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:01:55,707 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T22:01:55,708 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:01:55,708 INFO [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:01:55,709 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732226515705Disabling compacts and flushes for region at 1732226515705Disabling writes for close at 1732226515707 (+2 ms)Writing region close event to WAL at 1732226515708 (+1 ms)Closed at 1732226515708 2024-11-21T22:01:55,712 WARN [master/2a884b411602:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/data/master/store/.initializing 2024-11-21T22:01:55,712 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042 2024-11-21T22:01:55,725 INFO [master/2a884b411602:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T22:01:55,746 INFO [master/2a884b411602:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2a884b411602%2C42183%2C1732226514042, suffix=, logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042, archiveDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/oldWALs, maxLogs=10 2024-11-21T22:01:55,775 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752, exclude list is [], retry=0 2024-11-21T22:01:55,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36723,DS-7a6873e5-82b6-4604-bf0e-68e143180355,DISK] 2024-11-21T22:01:55,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36581,DS-544ebff7-bba4-4176-b2a1-6177a252a6f2,DISK] 2024-11-21T22:01:55,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35185,DS-74b97c14-8623-4374-84bd-cd03bbdbe206,DISK] 2024-11-21T22:01:55,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-21T22:01:55,863 INFO [master/2a884b411602:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 2024-11-21T22:01:55,867 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35833:35833),(127.0.0.1/127.0.0.1:44313:44313),(127.0.0.1/127.0.0.1:44159:44159)] 2024-11-21T22:01:55,868 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T22:01:55,868 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:01:55,872 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,873 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T22:01:55,959 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:55,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:55,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T22:01:55,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:55,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:01:55,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T22:01:55,973 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:55,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:01:55,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T22:01:55,980 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:55,982 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:01:55,983 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,988 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,990 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,998 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:55,998 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:56,003 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T22:01:56,029 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T22:01:56,053 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:01:56,056 INFO [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68571180, jitterRate=0.02179020643234253}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T22:01:56,067 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732226515891Initializing all the Stores at 1732226515894 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226515894Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226515898 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226515899 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226515899Cleaning up temporary data from old regions at 1732226515998 (+99 ms)Region opened successfully at 1732226516067 (+69 ms) 2024-11-21T22:01:56,079 INFO [master/2a884b411602:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T22:01:56,149 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dd9df84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2a884b411602/172.17.0.2:0 2024-11-21T22:01:56,182 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T22:01:56,193 INFO [master/2a884b411602:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T22:01:56,193 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T22:01:56,196 INFO [master/2a884b411602:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T22:01:56,197 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-21T22:01:56,203 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-21T22:01:56,203 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T22:01:56,229 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T22:01:56,238 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T22:01:56,252 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T22:01:56,256 INFO [master/2a884b411602:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T22:01:56,257 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T22:01:56,267 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T22:01:56,270 INFO [master/2a884b411602:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T22:01:56,274 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T22:01:56,286 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T22:01:56,287 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T22:01:56,302 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T22:01:56,318 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T22:01:56,327 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T22:01:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T22:01:56,336 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T22:01:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T22:01:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T22:01:56,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,336 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,339 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2a884b411602,42183,1732226514042, sessionid=0x10160b48b8a0000, setting cluster-up flag (Was=false) 2024-11-21T22:01:56,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,369 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,419 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T22:01:56,421 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2a884b411602,42183,1732226514042 2024-11-21T22:01:56,452 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:56,502 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T22:01:56,505 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2a884b411602,42183,1732226514042 2024-11-21T22:01:56,510 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T22:01:56,534 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(746): ClusterId : a66a9a66-f6cf-4476-80e0-53486503c165 2024-11-21T22:01:56,534 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(746): ClusterId : a66a9a66-f6cf-4476-80e0-53486503c165 2024-11-21T22:01:56,534 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(746): ClusterId : a66a9a66-f6cf-4476-80e0-53486503c165 2024-11-21T22:01:56,537 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T22:01:56,537 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T22:01:56,537 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T22:01:56,542 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-21T22:01:56,546 INFO [master/2a884b411602:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:01:56,547 INFO [master/2a884b411602:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-21T22:01:56,555 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T22:01:56,555 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T22:01:56,555 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T22:01:56,555 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T22:01:56,555 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T22:01:56,555 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T22:01:56,570 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T22:01:56,571 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T22:01:56,571 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T22:01:56,571 DEBUG [RS:2;2a884b411602:36419 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f1a0db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2a884b411602/172.17.0.2:0 2024-11-21T22:01:56,571 DEBUG [RS:1;2a884b411602:44629 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4522213, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2a884b411602/172.17.0.2:0 2024-11-21T22:01:56,573 DEBUG [RS:0;2a884b411602:41399 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@461b789, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2a884b411602/172.17.0.2:0 2024-11-21T22:01:56,601 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2a884b411602:36419 2024-11-21T22:01:56,606 INFO [RS:2;2a884b411602:36419 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T22:01:56,606 INFO [RS:2;2a884b411602:36419 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T22:01:56,607 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-21T22:01:56,607 INFO [RS:2;2a884b411602:36419 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:01:56,607 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T22:01:56,611 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(2659): reportForDuty to master=2a884b411602,42183,1732226514042 with port=36419, startcode=1732226515183 2024-11-21T22:01:56,611 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2a884b411602:44629 2024-11-21T22:01:56,611 INFO [RS:1;2a884b411602:44629 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T22:01:56,611 INFO [RS:1;2a884b411602:44629 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T22:01:56,611 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-21T22:01:56,613 INFO [RS:1;2a884b411602:44629 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:01:56,613 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T22:01:56,617 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(2659): reportForDuty to master=2a884b411602,42183,1732226514042 with port=44629, startcode=1732226515126 2024-11-21T22:01:56,631 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2a884b411602:41399 2024-11-21T22:01:56,631 INFO [RS:0;2a884b411602:41399 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T22:01:56,631 INFO [RS:0;2a884b411602:41399 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T22:01:56,631 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-21T22:01:56,632 INFO [RS:0;2a884b411602:41399 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:01:56,632 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T22:01:56,639 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(2659): reportForDuty to master=2a884b411602,42183,1732226514042 with port=41399, startcode=1732226514979 2024-11-21T22:01:56,665 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T22:01:56,670 DEBUG [RS:0;2a884b411602:41399 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T22:01:56,670 DEBUG [RS:2;2a884b411602:36419 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T22:01:56,672 DEBUG [RS:1;2a884b411602:44629 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T22:01:56,690 INFO [master/2a884b411602:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T22:01:56,702 INFO [master/2a884b411602:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T22:01:56,714 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2a884b411602,42183,1732226514042 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T22:01:56,729 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2a884b411602:0, corePoolSize=5, maxPoolSize=5 2024-11-21T22:01:56,730 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2a884b411602:0, corePoolSize=5, maxPoolSize=5 2024-11-21T22:01:56,730 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2a884b411602:0, corePoolSize=5, maxPoolSize=5 2024-11-21T22:01:56,731 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2a884b411602:0, corePoolSize=5, maxPoolSize=5 2024-11-21T22:01:56,731 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2a884b411602:0, corePoolSize=10, maxPoolSize=10 2024-11-21T22:01:56,731 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:56,731 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2a884b411602:0, corePoolSize=2, maxPoolSize=2 2024-11-21T22:01:56,731 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:56,745 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T22:01:56,746 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T22:01:56,746 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732226546746 2024-11-21T22:01:56,747 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50513, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T22:01:56,747 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50421, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T22:01:56,748 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T22:01:56,749 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T22:01:56,751 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T22:01:56,753 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:56,753 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T22:01:56,754 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T22:01:56,754 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T22:01:56,754 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T22:01:56,754 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T22:01:56,754 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-21T22:01:56,763 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-21T22:01:56,764 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-21T22:01:56,786 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:56,797 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T22:01:56,799 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T22:01:56,799 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T22:01:56,799 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T22:01:56,799 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T22:01:56,799 WARN [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T22:01:56,799 WARN [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T22:01:56,799 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T22:01:56,801 WARN [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T22:01:56,807 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T22:01:56,808 INFO [master/2a884b411602:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T22:01:56,811 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2a884b411602:0:becomeActiveMaster-HFileCleaner.large.0-1732226516809,5,FailOnTimeoutGroup] 2024-11-21T22:01:56,822 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2a884b411602:0:becomeActiveMaster-HFileCleaner.small.0-1732226516812,5,FailOnTimeoutGroup] 2024-11-21T22:01:56,823 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:56,823 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T22:01:56,824 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:56,825 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:56,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741831_1007 (size=1321) 2024-11-21T22:01:56,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741831_1007 (size=1321) 2024-11-21T22:01:56,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741831_1007 (size=1321) 2024-11-21T22:01:56,830 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T22:01:56,831 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:01:56,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741832_1008 (size=32) 2024-11-21T22:01:56,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741832_1008 (size=32) 2024-11-21T22:01:56,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741832_1008 (size=32) 2024-11-21T22:01:56,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:01:56,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T22:01:56,876 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T22:01:56,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:56,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:56,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T22:01:56,882 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T22:01:56,882 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:56,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:56,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T22:01:56,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T22:01:56,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:56,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:56,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T22:01:56,894 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T22:01:56,894 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:56,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:56,896 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T22:01:56,898 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740 2024-11-21T22:01:56,899 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740 2024-11-21T22:01:56,901 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(2659): reportForDuty to master=2a884b411602,42183,1732226514042 with port=44629, startcode=1732226515126 2024-11-21T22:01:56,901 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(2659): reportForDuty to master=2a884b411602,42183,1732226514042 with port=36419, startcode=1732226515183 2024-11-21T22:01:56,903 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(2659): reportForDuty to master=2a884b411602,42183,1732226514042 with port=41399, startcode=1732226514979 2024-11-21T22:01:56,905 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2a884b411602,41399,1732226514979 2024-11-21T22:01:56,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T22:01:56,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T22:01:56,908 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] master.ServerManager(517): Registering regionserver=2a884b411602,41399,1732226514979 2024-11-21T22:01:56,911 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T22:01:56,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T22:01:56,922 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2a884b411602,36419,1732226515183 2024-11-21T22:01:56,922 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] master.ServerManager(517): Registering regionserver=2a884b411602,36419,1732226515183 2024-11-21T22:01:56,923 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:01:56,923 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40237 2024-11-21T22:01:56,923 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T22:01:56,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2a884b411602,44629,1732226515126 2024-11-21T22:01:56,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] master.ServerManager(517): Registering regionserver=2a884b411602,44629,1732226515126 2024-11-21T22:01:56,929 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:01:56,930 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40237 2024-11-21T22:01:56,930 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T22:01:56,933 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:01:56,936 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60130176, jitterRate=-0.10399055480957031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T22:01:56,937 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:01:56,937 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40237 2024-11-21T22:01:56,937 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T22:01:56,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732226516868Initializing all the Stores at 1732226516871 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226516871Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226516871Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226516871Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226516872 (+1 ms)Cleaning up temporary data from old regions at 1732226516906 (+34 ms)Region opened successfully at 1732226516940 (+34 ms) 2024-11-21T22:01:56,941 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T22:01:56,941 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T22:01:56,941 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T22:01:56,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-21T22:01:56,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T22:01:56,944 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T22:01:56,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732226516940Disabling compacts and flushes for region at 1732226516940Disabling writes for close at 1732226516942 (+2 ms)Writing region close event to WAL at 1732226516944 (+2 ms)Closed at 1732226516944 2024-11-21T22:01:56,949 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T22:01:56,949 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T22:01:56,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T22:01:56,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T22:01:56,972 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T22:01:56,977 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T22:01:57,009 DEBUG [RS:0;2a884b411602:41399 {}] zookeeper.ZKUtil(111): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2a884b411602,41399,1732226514979 2024-11-21T22:01:57,010 WARN [RS:0;2a884b411602:41399 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T22:01:57,010 DEBUG [RS:2;2a884b411602:36419 {}] zookeeper.ZKUtil(111): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2a884b411602,36419,1732226515183 2024-11-21T22:01:57,010 DEBUG [RS:1;2a884b411602:44629 {}] zookeeper.ZKUtil(111): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2a884b411602,44629,1732226515126 2024-11-21T22:01:57,010 INFO [RS:0;2a884b411602:41399 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T22:01:57,010 WARN [RS:1;2a884b411602:44629 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T22:01:57,010 WARN [RS:2;2a884b411602:36419 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T22:01:57,010 INFO [RS:2;2a884b411602:36419 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T22:01:57,010 INFO [RS:1;2a884b411602:44629 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T22:01:57,010 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979 2024-11-21T22:01:57,010 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,44629,1732226515126 2024-11-21T22:01:57,010 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,36419,1732226515183 2024-11-21T22:01:57,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2a884b411602,41399,1732226514979] 2024-11-21T22:01:57,011 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2a884b411602,36419,1732226515183] 2024-11-21T22:01:57,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2a884b411602,44629,1732226515126] 2024-11-21T22:01:57,039 INFO [RS:2;2a884b411602:36419 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T22:01:57,039 INFO [RS:1;2a884b411602:44629 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T22:01:57,039 INFO [RS:0;2a884b411602:41399 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T22:01:57,060 INFO [RS:1;2a884b411602:44629 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T22:01:57,062 INFO [RS:0;2a884b411602:41399 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T22:01:57,067 INFO [RS:2;2a884b411602:36419 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T22:01:57,072 INFO [RS:1;2a884b411602:44629 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T22:01:57,072 INFO [RS:0;2a884b411602:41399 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T22:01:57,072 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,072 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,074 INFO [RS:2;2a884b411602:36419 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T22:01:57,074 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,075 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T22:01:57,078 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T22:01:57,078 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T22:01:57,082 INFO [RS:0;2a884b411602:41399 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T22:01:57,085 INFO [RS:1;2a884b411602:44629 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T22:01:57,082 INFO [RS:2;2a884b411602:36419 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T22:01:57,088 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,088 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,088 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,089 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,089 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2a884b411602:0, corePoolSize=2, maxPoolSize=2 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2a884b411602:0, corePoolSize=2, maxPoolSize=2 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,090 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,091 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,091 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2a884b411602:0, corePoolSize=2, maxPoolSize=2 2024-11-21T22:01:57,091 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0, corePoolSize=3, maxPoolSize=3 2024-11-21T22:01:57,091 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,091 DEBUG [RS:0;2a884b411602:41399 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2a884b411602:0, corePoolSize=3, maxPoolSize=3 2024-11-21T22:01:57,091 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,091 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,091 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,091 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,092 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2a884b411602:0, corePoolSize=1, maxPoolSize=1 2024-11-21T22:01:57,092 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0, corePoolSize=3, maxPoolSize=3 2024-11-21T22:01:57,092 DEBUG [RS:2;2a884b411602:36419 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2a884b411602:0, corePoolSize=3, maxPoolSize=3 2024-11-21T22:01:57,094 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0, corePoolSize=3, maxPoolSize=3 2024-11-21T22:01:57,095 DEBUG [RS:1;2a884b411602:44629 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2a884b411602:0, corePoolSize=3, maxPoolSize=3 2024-11-21T22:01:57,099 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,100 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,100 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,100 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,100 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,100 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,36419,1732226515183-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,107 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,108 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,108 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,108 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,44629,1732226515126-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T22:01:57,108 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,41399,1732226514979-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T22:01:57,128 WARN [2a884b411602:42183 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T22:01:57,136 INFO [RS:2;2a884b411602:36419 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T22:01:57,136 INFO [RS:1;2a884b411602:44629 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T22:01:57,138 INFO [RS:0;2a884b411602:41399 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T22:01:57,138 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,36419,1732226515183-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,138 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,44629,1732226515126-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,138 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,41399,1732226514979-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,139 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,139 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,139 INFO [RS:2;2a884b411602:36419 {}] regionserver.Replication(171): 2a884b411602,36419,1732226515183 started 2024-11-21T22:01:57,139 INFO [RS:0;2a884b411602:41399 {}] regionserver.Replication(171): 2a884b411602,41399,1732226514979 started 2024-11-21T22:01:57,140 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,140 INFO [RS:1;2a884b411602:44629 {}] regionserver.Replication(171): 2a884b411602,44629,1732226515126 started 2024-11-21T22:01:57,163 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,164 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1482): Serving as 2a884b411602,36419,1732226515183, RpcServer on 2a884b411602/172.17.0.2:36419, sessionid=0x10160b48b8a0003 2024-11-21T22:01:57,165 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T22:01:57,165 DEBUG [RS:2;2a884b411602:36419 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2a884b411602,36419,1732226515183 2024-11-21T22:01:57,165 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,165 DEBUG [RS:2;2a884b411602:36419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2a884b411602,36419,1732226515183' 2024-11-21T22:01:57,165 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1482): Serving as 2a884b411602,41399,1732226514979, RpcServer on 2a884b411602/172.17.0.2:41399, sessionid=0x10160b48b8a0001 2024-11-21T22:01:57,165 DEBUG [RS:2;2a884b411602:36419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T22:01:57,166 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T22:01:57,166 DEBUG [RS:0;2a884b411602:41399 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2a884b411602,41399,1732226514979 2024-11-21T22:01:57,166 DEBUG [RS:0;2a884b411602:41399 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2a884b411602,41399,1732226514979' 2024-11-21T22:01:57,166 DEBUG [RS:0;2a884b411602:41399 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T22:01:57,167 DEBUG [RS:2;2a884b411602:36419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T22:01:57,168 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T22:01:57,168 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T22:01:57,168 DEBUG [RS:2;2a884b411602:36419 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2a884b411602,36419,1732226515183 2024-11-21T22:01:57,168 DEBUG [RS:2;2a884b411602:36419 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2a884b411602,36419,1732226515183' 2024-11-21T22:01:57,168 DEBUG [RS:2;2a884b411602:36419 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T22:01:57,169 DEBUG [RS:2;2a884b411602:36419 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T22:01:57,169 DEBUG [RS:2;2a884b411602:36419 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T22:01:57,169 INFO [RS:2;2a884b411602:36419 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T22:01:57,170 INFO [RS:2;2a884b411602:36419 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T22:01:57,178 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,178 DEBUG [RS:0;2a884b411602:41399 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T22:01:57,178 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1482): Serving as 2a884b411602,44629,1732226515126, RpcServer on 2a884b411602/172.17.0.2:44629, sessionid=0x10160b48b8a0002 2024-11-21T22:01:57,179 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T22:01:57,179 DEBUG [RS:1;2a884b411602:44629 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2a884b411602,44629,1732226515126 2024-11-21T22:01:57,179 DEBUG [RS:1;2a884b411602:44629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2a884b411602,44629,1732226515126' 2024-11-21T22:01:57,179 DEBUG [RS:1;2a884b411602:44629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T22:01:57,180 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T22:01:57,180 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T22:01:57,180 DEBUG [RS:0;2a884b411602:41399 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2a884b411602,41399,1732226514979 2024-11-21T22:01:57,180 DEBUG [RS:0;2a884b411602:41399 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2a884b411602,41399,1732226514979' 2024-11-21T22:01:57,180 DEBUG [RS:0;2a884b411602:41399 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T22:01:57,181 DEBUG [RS:0;2a884b411602:41399 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T22:01:57,181 DEBUG [RS:0;2a884b411602:41399 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T22:01:57,182 INFO [RS:0;2a884b411602:41399 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T22:01:57,182 INFO [RS:0;2a884b411602:41399 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T22:01:57,189 DEBUG [RS:1;2a884b411602:44629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T22:01:57,195 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T22:01:57,195 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T22:01:57,195 DEBUG [RS:1;2a884b411602:44629 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2a884b411602,44629,1732226515126 2024-11-21T22:01:57,195 DEBUG [RS:1;2a884b411602:44629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2a884b411602,44629,1732226515126' 2024-11-21T22:01:57,195 DEBUG [RS:1;2a884b411602:44629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T22:01:57,196 DEBUG [RS:1;2a884b411602:44629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T22:01:57,197 DEBUG [RS:1;2a884b411602:44629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T22:01:57,197 INFO [RS:1;2a884b411602:44629 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T22:01:57,197 INFO [RS:1;2a884b411602:44629 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T22:01:57,274 INFO [RS:2;2a884b411602:36419 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T22:01:57,277 INFO [RS:2;2a884b411602:36419 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2a884b411602%2C36419%2C1732226515183, suffix=, logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,36419,1732226515183, archiveDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs, maxLogs=32 2024-11-21T22:01:57,282 INFO [RS:0;2a884b411602:41399 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T22:01:57,285 INFO [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2a884b411602%2C41399%2C1732226514979, suffix=, logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979, archiveDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs, maxLogs=32 2024-11-21T22:01:57,292 DEBUG [RS:2;2a884b411602:36419 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,36419,1732226515183/2a884b411602%2C36419%2C1732226515183.1732226517279, exclude list is [], retry=0 2024-11-21T22:01:57,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36723,DS-7a6873e5-82b6-4604-bf0e-68e143180355,DISK] 2024-11-21T22:01:57,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36581,DS-544ebff7-bba4-4176-b2a1-6177a252a6f2,DISK] 2024-11-21T22:01:57,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35185,DS-74b97c14-8623-4374-84bd-cd03bbdbe206,DISK] 2024-11-21T22:01:57,298 INFO [RS:1;2a884b411602:44629 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T22:01:57,301 INFO [RS:1;2a884b411602:44629 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2a884b411602%2C44629%2C1732226515126, suffix=, logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,44629,1732226515126, archiveDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs, maxLogs=32 2024-11-21T22:01:57,303 DEBUG [RS:0;2a884b411602:41399 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979/2a884b411602%2C41399%2C1732226514979.1732226517287, exclude list is [], retry=0 2024-11-21T22:01:57,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35185,DS-74b97c14-8623-4374-84bd-cd03bbdbe206,DISK] 2024-11-21T22:01:57,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36723,DS-7a6873e5-82b6-4604-bf0e-68e143180355,DISK] 2024-11-21T22:01:57,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36581,DS-544ebff7-bba4-4176-b2a1-6177a252a6f2,DISK] 2024-11-21T22:01:57,310 INFO [RS:2;2a884b411602:36419 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,36419,1732226515183/2a884b411602%2C36419%2C1732226515183.1732226517279 2024-11-21T22:01:57,312 DEBUG [RS:2;2a884b411602:36419 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44313:44313),(127.0.0.1/127.0.0.1:35833:35833),(127.0.0.1/127.0.0.1:44159:44159)] 2024-11-21T22:01:57,326 INFO [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979/2a884b411602%2C41399%2C1732226514979.1732226517287 2024-11-21T22:01:57,327 DEBUG [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35833:35833),(127.0.0.1/127.0.0.1:44159:44159),(127.0.0.1/127.0.0.1:44313:44313)] 2024-11-21T22:01:57,332 DEBUG [RS:1;2a884b411602:44629 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,44629,1732226515126/2a884b411602%2C44629%2C1732226515126.1732226517304, exclude list is [], retry=0 2024-11-21T22:01:57,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36723,DS-7a6873e5-82b6-4604-bf0e-68e143180355,DISK] 2024-11-21T22:01:57,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36581,DS-544ebff7-bba4-4176-b2a1-6177a252a6f2,DISK] 2024-11-21T22:01:57,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35185,DS-74b97c14-8623-4374-84bd-cd03bbdbe206,DISK] 2024-11-21T22:01:57,344 INFO [RS:1;2a884b411602:44629 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,44629,1732226515126/2a884b411602%2C44629%2C1732226515126.1732226517304 2024-11-21T22:01:57,351 DEBUG [RS:1;2a884b411602:44629 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35833:35833),(127.0.0.1/127.0.0.1:44159:44159),(127.0.0.1/127.0.0.1:44313:44313)] 2024-11-21T22:01:57,381 DEBUG [2a884b411602:42183 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-21T22:01:57,388 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:01:57,395 INFO [2a884b411602:42183 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:01:57,395 INFO [2a884b411602:42183 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:01:57,395 INFO [2a884b411602:42183 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:01:57,395 DEBUG [2a884b411602:42183 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:01:57,401 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:01:57,411 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2a884b411602,41399,1732226514979, state=OPENING 2024-11-21T22:01:57,450 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T22:01:57,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:57,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:57,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:57,461 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:01:57,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,463 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T22:01:57,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:01:57,642 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T22:01:57,645 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T22:01:57,660 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T22:01:57,660 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T22:01:57,661 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T22:01:57,665 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2a884b411602%2C41399%2C1732226514979.meta, suffix=.meta, logDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979, archiveDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs, maxLogs=32 2024-11-21T22:01:57,679 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979/2a884b411602%2C41399%2C1732226514979.meta.1732226517666.meta, exclude list is [], retry=0 2024-11-21T22:01:57,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35185,DS-74b97c14-8623-4374-84bd-cd03bbdbe206,DISK] 2024-11-21T22:01:57,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36723,DS-7a6873e5-82b6-4604-bf0e-68e143180355,DISK] 2024-11-21T22:01:57,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36581,DS-544ebff7-bba4-4176-b2a1-6177a252a6f2,DISK] 2024-11-21T22:01:57,686 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/WALs/2a884b411602,41399,1732226514979/2a884b411602%2C41399%2C1732226514979.meta.1732226517666.meta 2024-11-21T22:01:57,686 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44313:44313),(127.0.0.1/127.0.0.1:35833:35833),(127.0.0.1/127.0.0.1:44159:44159)] 2024-11-21T22:01:57,686 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T22:01:57,687 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-21T22:01:57,688 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:01:57,689 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T22:01:57,690 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T22:01:57,691 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T22:01:57,699 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T22:01:57,699 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:01:57,699 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T22:01:57,699 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T22:01:57,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T22:01:57,705 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T22:01:57,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:57,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:57,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T22:01:57,708 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T22:01:57,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:57,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:57,710 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T22:01:57,712 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T22:01:57,712 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:57,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:57,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T22:01:57,715 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T22:01:57,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:57,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T22:01:57,717 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T22:01:57,719 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740 2024-11-21T22:01:57,721 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740 2024-11-21T22:01:57,724 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T22:01:57,724 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T22:01:57,725 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T22:01:57,727 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T22:01:57,729 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72751331, jitterRate=0.084079310297966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T22:01:57,729 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T22:01:57,732 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732226517700Writing region info on filesystem at 1732226517700Initializing all the Stores at 1732226517702 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226517702Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226517703 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226517703Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226517703Cleaning up temporary data from old regions at 1732226517724 (+21 ms)Running coprocessor post-open hooks at 1732226517729 (+5 ms)Region opened successfully at 1732226517732 (+3 ms) 2024-11-21T22:01:57,739 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732226517631 2024-11-21T22:01:57,751 DEBUG [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T22:01:57,751 INFO [RS_OPEN_META-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T22:01:57,753 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:01:57,756 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2a884b411602,41399,1732226514979, state=OPEN 2024-11-21T22:01:57,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T22:01:57,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T22:01:57,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T22:01:57,777 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T22:01:57,777 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,777 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,777 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,777 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T22:01:57,778 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2a884b411602,41399,1732226514979 2024-11-21T22:01:57,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T22:01:57,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2a884b411602,41399,1732226514979 in 313 msec 2024-11-21T22:01:57,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T22:01:57,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 828 msec 2024-11-21T22:01:57,794 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T22:01:57,794 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T22:01:57,812 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:01:57,813 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:01:57,840 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:01:57,842 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57763, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:01:57,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3140 sec 2024-11-21T22:01:57,870 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732226517870, completionTime=-1 2024-11-21T22:01:57,873 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-21T22:01:57,874 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T22:01:57,908 INFO [master/2a884b411602:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-21T22:01:57,908 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732226577908 2024-11-21T22:01:57,909 INFO [master/2a884b411602:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732226637909 2024-11-21T22:01:57,909 INFO [master/2a884b411602:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 35 msec 2024-11-21T22:01:57,911 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:01:57,919 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,42183,1732226514042-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,919 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,42183,1732226514042-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,919 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,42183,1732226514042-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,921 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2a884b411602:42183, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,921 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,922 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T22:01:57,929 DEBUG [master/2a884b411602:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T22:01:57,968 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.652sec 2024-11-21T22:01:57,970 INFO [master/2a884b411602:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T22:01:57,972 INFO [master/2a884b411602:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T22:01:57,974 INFO [master/2a884b411602:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T22:01:57,975 INFO [master/2a884b411602:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T22:01:57,975 INFO [master/2a884b411602:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T22:01:57,976 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,42183,1732226514042-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T22:01:57,977 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,42183,1732226514042-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T22:01:58,014 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T22:01:58,014 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 2a884b411602,42183,1732226514042 2024-11-21T22:01:58,018 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@21026223 2024-11-21T22:01:58,020 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T22:01:58,022 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44879, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T22:01:58,082 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T22:01:58,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-21T22:01:58,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:01:58,109 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:58,109 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-21T22:01:58,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T22:01:58,126 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:01:58,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac404c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:01:58,139 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-21T22:01:58,139 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-21T22:01:58,156 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:01:58,158 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:01:58,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741837_1013 (size=349) 2024-11-21T22:01:58,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741837_1013 (size=349) 2024-11-21T22:01:58,179 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:01:58,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741837_1013 (size=349) 2024-11-21T22:01:58,186 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:01:58,186 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:01:58,187 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1efd03e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:01:58,187 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:01:58,189 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 807d59511a09f362346016a4b356021c, NAME => 'hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:01:58,190 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:01:58,192 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:01:58,206 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:01:58,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27614ae2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:01:58,210 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:01:58,219 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:01:58,220 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:01:58,222 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2a884b411602,42183,1732226514042 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/test.cache.data in system properties and HBase conf 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir in system properties and HBase conf 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T22:01:58,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T22:01:58,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T22:01:58,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T22:01:58,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/nfs.dump.dir in system properties and HBase conf 2024-11-21T22:01:58,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir in system properties and HBase conf 2024-11-21T22:01:58,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T22:01:58,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T22:01:58,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T22:01:58,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741838_1014 (size=36) 2024-11-21T22:01:58,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741838_1014 (size=36) 2024-11-21T22:01:58,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741838_1014 (size=36) 2024-11-21T22:01:58,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T22:01:58,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741839_1015 (size=592039) 2024-11-21T22:01:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741839_1015 (size=592039) 2024-11-21T22:01:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741839_1015 (size=592039) 2024-11-21T22:01:58,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741840_1016 (size=1663647) 2024-11-21T22:01:58,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741840_1016 (size=1663647) 2024-11-21T22:01:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741840_1016 (size=1663647) 2024-11-21T22:01:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T22:01:58,653 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:01:58,654 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 807d59511a09f362346016a4b356021c, disabling compactions & flushes 2024-11-21T22:01:58,654 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:58,654 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:58,654 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. after waiting 0 ms 2024-11-21T22:01:58,654 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:58,654 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:58,654 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 807d59511a09f362346016a4b356021c: Waiting for close lock at 1732226518653Disabling compacts and flushes for region at 1732226518653Disabling writes for close at 1732226518654 (+1 ms)Writing region close event to WAL at 1732226518654Closed at 1732226518654 2024-11-21T22:01:58,657 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:01:58,665 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732226518658"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226518658"}]},"ts":"1732226518658"} 2024-11-21T22:01:58,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T22:01:58,677 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:01:58,688 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226518677"}]},"ts":"1732226518677"} 2024-11-21T22:01:58,697 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-21T22:01:58,698 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:01:58,703 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:01:58,704 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:01:58,704 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:01:58,704 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:01:58,704 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:01:58,704 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:01:58,704 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:01:58,704 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:01:58,704 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:01:58,704 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:01:58,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=807d59511a09f362346016a4b356021c, ASSIGN}] 2024-11-21T22:01:58,718 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=807d59511a09f362346016a4b356021c, ASSIGN 2024-11-21T22:01:58,725 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=807d59511a09f362346016a4b356021c, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:01:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T22:01:58,878 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-21T22:01:58,879 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=807d59511a09f362346016a4b356021c, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:01:58,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=807d59511a09f362346016a4b356021c, ASSIGN because future has completed 2024-11-21T22:01:58,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 807d59511a09f362346016a4b356021c, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:01:59,061 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T22:01:59,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34621, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T22:01:59,130 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:59,131 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 807d59511a09f362346016a4b356021c, NAME => 'hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c.', STARTKEY => '', ENDKEY => ''} 2024-11-21T22:01:59,131 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. service=AccessControlService 2024-11-21T22:01:59,131 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:01:59,132 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,132 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:01:59,132 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,132 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,159 INFO [StoreOpener-807d59511a09f362346016a4b356021c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,166 INFO [StoreOpener-807d59511a09f362346016a4b356021c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 807d59511a09f362346016a4b356021c columnFamilyName l 2024-11-21T22:01:59,166 DEBUG [StoreOpener-807d59511a09f362346016a4b356021c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:01:59,172 INFO [StoreOpener-807d59511a09f362346016a4b356021c-1 {}] regionserver.HStore(327): Store=807d59511a09f362346016a4b356021c/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:01:59,172 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,175 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,176 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,177 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,177 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,182 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,196 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:01:59,198 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 807d59511a09f362346016a4b356021c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71245188, jitterRate=0.06163603067398071}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:01:59,198 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 807d59511a09f362346016a4b356021c 2024-11-21T22:01:59,201 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 807d59511a09f362346016a4b356021c: Running coprocessor pre-open hook at 1732226519132Writing region info on filesystem at 1732226519132Initializing all the Stores at 1732226519156 (+24 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732226519156Cleaning up temporary data from old regions at 1732226519177 (+21 ms)Running coprocessor post-open hooks at 1732226519198 (+21 ms)Region opened successfully at 1732226519201 (+3 ms) 2024-11-21T22:01:59,204 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., pid=6, masterSystemTime=1732226519061 2024-11-21T22:01:59,213 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:59,213 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:01:59,213 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=807d59511a09f362346016a4b356021c, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:01:59,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 807d59511a09f362346016a4b356021c, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:01:59,232 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=2a884b411602,36419,1732226515183, table=hbase:acl, region=807d59511a09f362346016a4b356021c. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T22:01:59,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T22:01:59,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 807d59511a09f362346016a4b356021c, server=2a884b411602,36419,1732226515183 in 355 msec 2024-11-21T22:01:59,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T22:01:59,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=807d59511a09f362346016a4b356021c, ASSIGN in 536 msec 2024-11-21T22:01:59,270 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:01:59,270 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226519270"}]},"ts":"1732226519270"} 2024-11-21T22:01:59,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T22:01:59,277 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-21T22:01:59,280 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:01:59,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 1.1810 sec 2024-11-21T22:02:00,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T22:02:00,284 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-21T22:02:00,295 DEBUG [master/2a884b411602:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T22:02:00,296 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T22:02:00,297 INFO [master/2a884b411602:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2a884b411602,42183,1732226514042-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T22:02:00,528 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:00,700 WARN [Thread-384 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:00,964 INFO [Thread-384 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:02:00,968 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-21T22:02:00,969 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:02:00,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:02:00,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:02:00,983 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T22:02:00,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:01,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ac57c8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:02:01,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57d25206{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T22:02:01,031 INFO [Thread-384 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:02:01,031 INFO [Thread-384 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:02:01,031 INFO [Thread-384 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T22:02:01,032 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1094c217{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:02:01,033 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@140feb75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T22:02:01,193 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-21T22:02:01,194 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-21T22:02:01,194 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-21T22:02:01,196 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-21T22:02:01,272 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:01,656 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:02,065 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:02,095 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c0e50ac{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-42555-hadoop-yarn-common-3_4_1_jar-_-any-8267069594534898418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-21T22:02:02,096 INFO [Thread-384 {}] server.AbstractConnector(333): Started ServerConnector@fd2a6fd{HTTP/1.1, (http/1.1)}{localhost:42555} 2024-11-21T22:02:02,096 INFO [Thread-384 {}] server.Server(415): Started @16841ms 2024-11-21T22:02:02,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67be7839{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-35593-hadoop-yarn-common-3_4_1_jar-_-any-10431419364766054176/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-21T22:02:02,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f77c49c{HTTP/1.1, (http/1.1)}{localhost:35593} 2024-11-21T22:02:02,110 INFO [Time-limited test {}] server.Server(415): Started @16855ms 2024-11-21T22:02:02,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741841_1017 (size=5) 2024-11-21T22:02:02,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741841_1017 (size=5) 2024-11-21T22:02:02,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741841_1017 (size=5) 2024-11-21T22:02:03,217 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:02:03,355 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-21T22:02:03,365 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-21T22:02:03,367 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:03,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T22:02:03,455 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-21T22:02:03,458 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:02:03,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:02:03,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:02:03,464 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T22:02:03,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:03,468 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8b95114{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:02:03,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3242ffcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T22:02:03,542 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-21T22:02:03,542 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-21T22:02:03,542 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-21T22:02:03,542 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-21T22:02:03,554 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:03,586 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:03,719 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:03,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15f0bbad{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-46865-hadoop-yarn-common-3_4_1_jar-_-any-9156240757607089050/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T22:02:03,732 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51bef505{HTTP/1.1, (http/1.1)}{localhost:46865} 2024-11-21T22:02:03,732 INFO [Time-limited test {}] server.Server(415): Started @18476ms 2024-11-21T22:02:04,016 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-21T22:02:04,020 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:04,067 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-21T22:02:04,079 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T22:02:04,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T22:02:04,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T22:02:04,110 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T22:02:04,111 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T22:02:04,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@266df67f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,AVAILABLE} 2024-11-21T22:02:04,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45660404{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T22:02:04,177 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-21T22:02:04,177 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-21T22:02:04,177 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-21T22:02:04,177 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-21T22:02:04,192 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:04,199 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:04,342 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T22:02:04,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43a89134{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/java.io.tmpdir/jetty-localhost-44927-hadoop-yarn-common-3_4_1_jar-_-any-9997500799287773256/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T22:02:04,351 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c9f43e1{HTTP/1.1, (http/1.1)}{localhost:44927} 2024-11-21T22:02:04,351 INFO [Time-limited test {}] server.Server(415): Started @19095ms 2024-11-21T22:02:04,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-21T22:02:04,389 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:04,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:04,424 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-21T22:02:04,425 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T22:02:04,425 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T22:02:04,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-21T22:02:04,427 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-21T22:02:04,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:04,429 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-21T22:02:04,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-21T22:02:04,429 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-21T22:02:04,430 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:04,430 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-21T22:02:04,430 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T22:02:04,430 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T22:02:04,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T22:02:04,431 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T22:02:04,441 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=719, OpenFileDescriptor=779, MaxFileDescriptor=1048576, SystemLoadAverage=336, ProcessCount=11, AvailableMemoryMB=3713 2024-11-21T22:02:04,444 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-11-21T22:02:04,449 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T22:02:04,455 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2a884b411602,42183,1732226514042 2024-11-21T22:02:04,455 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c9263c5 2024-11-21T22:02:04,456 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T22:02:04,462 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T22:02:04,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:02:04,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:04,471 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:02:04,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-21T22:02:04,472 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:04,476 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:02:04,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T22:02:04,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741842_1018 (size=406) 2024-11-21T22:02:04,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741842_1018 (size=406) 2024-11-21T22:02:04,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741842_1018 (size=406) 2024-11-21T22:02:04,524 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 99d725fca8429199afb17d5a8043d99e, NAME => 'testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:04,525 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 4521951792b1f21ac39ab04bdc740321, NAME => 'testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:04,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741843_1019 (size=67) 2024-11-21T22:02:04,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741844_1020 (size=67) 2024-11-21T22:02:04,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741843_1019 (size=67) 2024-11-21T22:02:04,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741844_1020 (size=67) 2024-11-21T22:02:04,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741844_1020 (size=67) 2024-11-21T22:02:04,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741843_1019 (size=67) 2024-11-21T22:02:04,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T22:02:04,583 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:04,584 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 4521951792b1f21ac39ab04bdc740321, disabling compactions & flushes 2024-11-21T22:02:04,584 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:04,584 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:04,584 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. after waiting 0 ms 2024-11-21T22:02:04,584 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:04,584 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:04,584 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 4521951792b1f21ac39ab04bdc740321: Waiting for close lock at 1732226524583Disabling compacts and flushes for region at 1732226524584 (+1 ms)Disabling writes for close at 1732226524584Writing region close event to WAL at 1732226524584Closed at 1732226524584 2024-11-21T22:02:04,589 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:04,589 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 99d725fca8429199afb17d5a8043d99e, disabling compactions & flushes 2024-11-21T22:02:04,589 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:04,589 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:04,590 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. after waiting 0 ms 2024-11-21T22:02:04,590 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:04,590 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:04,590 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 99d725fca8429199afb17d5a8043d99e: Waiting for close lock at 1732226524589Disabling compacts and flushes for region at 1732226524589Disabling writes for close at 1732226524590 (+1 ms)Writing region close event to WAL at 1732226524590Closed at 1732226524590 2024-11-21T22:02:04,593 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:02:04,594 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732226524593"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226524593"}]},"ts":"1732226524593"} 2024-11-21T22:02:04,594 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732226524593"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226524593"}]},"ts":"1732226524593"} 2024-11-21T22:02:04,643 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:02:04,646 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:02:04,646 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226524646"}]},"ts":"1732226524646"} 2024-11-21T22:02:04,651 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-21T22:02:04,652 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:02:04,654 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:02:04,654 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:02:04,654 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:02:04,654 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:02:04,655 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, ASSIGN}] 2024-11-21T22:02:04,658 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, ASSIGN 2024-11-21T22:02:04,658 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, ASSIGN 2024-11-21T22:02:04,660 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:02:04,660 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:02:04,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T22:02:04,811 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:02:04,811 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=99d725fca8429199afb17d5a8043d99e, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:04,811 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=4521951792b1f21ac39ab04bdc740321, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:04,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, ASSIGN because future has completed 2024-11-21T22:02:04,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99d725fca8429199afb17d5a8043d99e, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:02:04,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, ASSIGN because future has completed 2024-11-21T22:02:04,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4521951792b1f21ac39ab04bdc740321, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:04,993 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T22:02:05,017 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39293, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T22:02:05,023 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:05,024 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 99d725fca8429199afb17d5a8043d99e, NAME => 'testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:02:05,024 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. service=AccessControlService 2024-11-21T22:02:05,024 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:05,024 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,025 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:05,025 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,025 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,027 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:05,027 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 4521951792b1f21ac39ab04bdc740321, NAME => 'testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:02:05,027 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. service=AccessControlService 2024-11-21T22:02:05,028 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:05,028 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,028 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:05,028 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,028 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,035 INFO [StoreOpener-99d725fca8429199afb17d5a8043d99e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,036 INFO [StoreOpener-4521951792b1f21ac39ab04bdc740321-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,037 INFO [StoreOpener-99d725fca8429199afb17d5a8043d99e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99d725fca8429199afb17d5a8043d99e columnFamilyName cf 2024-11-21T22:02:05,037 DEBUG [StoreOpener-99d725fca8429199afb17d5a8043d99e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:05,038 INFO [StoreOpener-99d725fca8429199afb17d5a8043d99e-1 {}] regionserver.HStore(327): Store=99d725fca8429199afb17d5a8043d99e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:05,038 INFO [StoreOpener-4521951792b1f21ac39ab04bdc740321-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4521951792b1f21ac39ab04bdc740321 columnFamilyName cf 2024-11-21T22:02:05,038 DEBUG [StoreOpener-4521951792b1f21ac39ab04bdc740321-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:05,038 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,039 INFO [StoreOpener-4521951792b1f21ac39ab04bdc740321-1 {}] regionserver.HStore(327): Store=4521951792b1f21ac39ab04bdc740321/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:05,040 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,040 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,042 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,042 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,042 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,042 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,043 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,044 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,044 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,046 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,048 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,055 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:05,055 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:05,056 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 4521951792b1f21ac39ab04bdc740321; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67105711, jitterRate=-4.698336124420166E-5}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:05,056 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,056 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 99d725fca8429199afb17d5a8043d99e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62781972, jitterRate=-0.06447571516036987}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:05,056 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,057 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 4521951792b1f21ac39ab04bdc740321: Running coprocessor pre-open hook at 1732226525028Writing region info on filesystem at 1732226525029 (+1 ms)Initializing all the Stores at 1732226525035 (+6 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226525036 (+1 ms)Cleaning up temporary data from old regions at 1732226525044 (+8 ms)Running coprocessor post-open hooks at 1732226525056 (+12 ms)Region opened successfully at 1732226525057 (+1 ms) 2024-11-21T22:02:05,057 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 99d725fca8429199afb17d5a8043d99e: Running coprocessor pre-open hook at 1732226525025Writing region info on filesystem at 1732226525025Initializing all the Stores at 1732226525028 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226525028Cleaning up temporary data from old regions at 1732226525042 (+14 ms)Running coprocessor post-open hooks at 1732226525056 (+14 ms)Region opened successfully at 1732226525057 (+1 ms) 2024-11-21T22:02:05,060 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321., pid=11, masterSystemTime=1732226524993 2024-11-21T22:02:05,062 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e., pid=10, masterSystemTime=1732226524990 2024-11-21T22:02:05,065 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:05,065 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:05,066 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=4521951792b1f21ac39ab04bdc740321, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:05,066 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:05,067 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:05,069 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=99d725fca8429199afb17d5a8043d99e, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:05,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4521951792b1f21ac39ab04bdc740321, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:05,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99d725fca8429199afb17d5a8043d99e, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:02:05,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-21T22:02:05,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 4521951792b1f21ac39ab04bdc740321, server=2a884b411602,44629,1732226515126 in 240 msec 2024-11-21T22:02:05,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-21T22:02:05,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, ASSIGN in 428 msec 2024-11-21T22:02:05,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 99d725fca8429199afb17d5a8043d99e, server=2a884b411602,41399,1732226514979 in 247 msec 2024-11-21T22:02:05,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-21T22:02:05,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, ASSIGN in 432 msec 2024-11-21T22:02:05,095 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:02:05,095 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226525095"}]},"ts":"1732226525095"} 2024-11-21T22:02:05,099 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-21T22:02:05,100 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:02:05,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T22:02:05,105 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-21T22:02:05,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:05,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:05,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51937, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:05,141 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:05,141 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:05,143 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:05,147 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-21T22:02:05,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:05,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:05,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42503, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-21T22:02:05,156 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-21T22:02:05,234 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:05,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:05,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:05,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:02:05,234 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:02:05,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:02:05,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:05,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:02:05,258 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:05,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:05,258 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:05,258 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:05,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:05,260 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:05,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 795 msec 2024-11-21T22:02:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T22:02:05,612 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T22:02:05,612 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-21T22:02:05,613 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:05,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-21T22:02:05,619 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:05,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-21T22:02:05,623 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T22:02:05,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T22:02:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226525636 (current time:1732226525636). 2024-11-21T22:02:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:02:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-21T22:02:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28bf87d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:05,640 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:05,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:05,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:05,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66475cfd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:05,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:05,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:05,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:05,642 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:05,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aab267, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:05,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:05,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:05,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52874, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:05,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:05,655 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a2b13c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:05,657 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:05,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:05,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:05,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec922ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:05,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:05,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:05,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:05,659 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35838, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:05,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50101877, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:05,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:05,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:05,663 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:05,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:05,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:05,668 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:05,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:05,670 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-21T22:02:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T22:02:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-21T22:02:05,683 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T22:02:05,688 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:05,701 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:05,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741845_1021 (size=167) 2024-11-21T22:02:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741845_1021 (size=167) 2024-11-21T22:02:05,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741845_1021 (size=167) 2024-11-21T22:02:05,716 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:05,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321}] 2024-11-21T22:02:05,722 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,723 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T22:02:05,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-21T22:02:05,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-21T22:02:05,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:05,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:05,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 99d725fca8429199afb17d5a8043d99e: 2024-11-21T22:02:05,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 4521951792b1f21ac39ab04bdc740321: 2024-11-21T22:02:05,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. for emptySnaptb0-testExportWithTargetName completed. 2024-11-21T22:02:05,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. for emptySnaptb0-testExportWithTargetName completed. 2024-11-21T22:02:05,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-21T22:02:05,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-21T22:02:05,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:05,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:05,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:02:05,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:02:05,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741846_1022 (size=70) 2024-11-21T22:02:05,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741846_1022 (size=70) 2024-11-21T22:02:05,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741846_1022 (size=70) 2024-11-21T22:02:05,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:05,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-21T22:02:05,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-21T22:02:05,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,943 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:05,948 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e in 227 msec 2024-11-21T22:02:05,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741847_1023 (size=70) 2024-11-21T22:02:05,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741847_1023 (size=70) 2024-11-21T22:02:05,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741847_1023 (size=70) 2024-11-21T22:02:05,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:05,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-21T22:02:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-21T22:02:05,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,955 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:05,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-21T22:02:05,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321 in 239 msec 2024-11-21T22:02:05,963 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:05,965 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:05,970 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:05,970 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-21T22:02:05,975 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-21T22:02:06,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T22:02:06,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741848_1024 (size=549) 2024-11-21T22:02:06,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741848_1024 (size=549) 2024-11-21T22:02:06,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741848_1024 (size=549) 2024-11-21T22:02:06,027 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:06,045 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:06,046 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-21T22:02:06,050 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:06,050 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-21T22:02:06,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 373 msec 2024-11-21T22:02:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T22:02:06,311 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T22:02:06,326 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='09223e976277c693b6904ee9868e8b7ce', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:06,328 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='194bac6dcd3d5c776a9e68a8c46dc7f1f', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:02:06,336 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:06,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:06,341 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:06,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:06,352 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T22:02:06,357 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-21T22:02:06,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:06,359 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:06,362 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T22:02:06,378 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T22:02:06,387 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T22:02:06,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T22:02:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226526392 (current time:1732226526392). 2024-11-21T22:02:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:02:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-21T22:02:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bf342fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:06,395 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:06,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:06,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:06,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5513adfd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:06,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:06,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:06,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:06,397 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35854, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:06,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5497c4af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:06,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:06,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:06,402 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:06,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:06,404 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6df4b460, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:06,407 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:06,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:06,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:06,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e1ee352, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:06,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:06,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:06,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:06,409 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:06,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c1d51fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:06,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:06,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:06,413 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:06,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:06,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:06,417 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:06,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-21T22:02:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:06,422 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T22:02:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-21T22:02:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T22:02:06,425 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:06,427 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:06,432 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:06,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741849_1025 (size=162) 2024-11-21T22:02:06,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741849_1025 (size=162) 2024-11-21T22:02:06,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741849_1025 (size=162) 2024-11-21T22:02:06,469 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:06,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321}] 2024-11-21T22:02:06,471 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:06,472 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T22:02:06,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-21T22:02:06,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-21T22:02:06,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:06,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:06,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 99d725fca8429199afb17d5a8043d99e 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-21T22:02:06,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 4521951792b1f21ac39ab04bdc740321 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-21T22:02:06,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/.tmp/cf/d6c3f85feb04481f85e3429084379cf5 is 71, key is 21f2dd409b8b44b401746417b16ea06c/cf:q/1732226526347/Put/seqid=0 2024-11-21T22:02:06,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/.tmp/cf/90aec1cbb1cb47e884307f2a6237af8f is 71, key is 0173dc8e8e480cf1b3819d22a98053e6/cf:q/1732226526337/Put/seqid=0 2024-11-21T22:02:06,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T22:02:06,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741850_1026 (size=8190) 2024-11-21T22:02:06,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741850_1026 (size=8190) 2024-11-21T22:02:06,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741850_1026 (size=8190) 2024-11-21T22:02:06,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/.tmp/cf/d6c3f85feb04481f85e3429084379cf5 2024-11-21T22:02:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741851_1027 (size=5422) 2024-11-21T22:02:06,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741851_1027 (size=5422) 2024-11-21T22:02:06,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741851_1027 (size=5422) 2024-11-21T22:02:06,796 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/.tmp/cf/90aec1cbb1cb47e884307f2a6237af8f 2024-11-21T22:02:06,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/.tmp/cf/90aec1cbb1cb47e884307f2a6237af8f as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf/90aec1cbb1cb47e884307f2a6237af8f 2024-11-21T22:02:06,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/.tmp/cf/d6c3f85feb04481f85e3429084379cf5 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf/d6c3f85feb04481f85e3429084379cf5 2024-11-21T22:02:06,910 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf/d6c3f85feb04481f85e3429084379cf5, entries=45, sequenceid=6, filesize=8.0 K 2024-11-21T22:02:06,923 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf/90aec1cbb1cb47e884307f2a6237af8f, entries=5, sequenceid=6, filesize=5.3 K 2024-11-21T22:02:06,925 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 4521951792b1f21ac39ab04bdc740321 in 289ms, sequenceid=6, compaction requested=false 2024-11-21T22:02:06,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-21T22:02:06,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 4521951792b1f21ac39ab04bdc740321: 2024-11-21T22:02:06,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. for snaptb0-testExportWithTargetName completed. 2024-11-21T22:02:06,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-21T22:02:06,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:06,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf/d6c3f85feb04481f85e3429084379cf5] hfiles 2024-11-21T22:02:06,934 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 99d725fca8429199afb17d5a8043d99e in 308ms, sequenceid=6, compaction requested=false 2024-11-21T22:02:06,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 99d725fca8429199afb17d5a8043d99e: 2024-11-21T22:02:06,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. for snaptb0-testExportWithTargetName completed. 2024-11-21T22:02:06,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-21T22:02:06,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:06,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf/90aec1cbb1cb47e884307f2a6237af8f] hfiles 2024-11-21T22:02:06,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf/90aec1cbb1cb47e884307f2a6237af8f for snapshot=snaptb0-testExportWithTargetName 2024-11-21T22:02:06,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf/d6c3f85feb04481f85e3429084379cf5 for snapshot=snaptb0-testExportWithTargetName 2024-11-21T22:02:06,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741853_1029 (size=109) 2024-11-21T22:02:06,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741853_1029 (size=109) 2024-11-21T22:02:06,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741853_1029 (size=109) 2024-11-21T22:02:06,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741852_1028 (size=109) 2024-11-21T22:02:06,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741852_1028 (size=109) 2024-11-21T22:02:06,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741852_1028 (size=109) 2024-11-21T22:02:06,986 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:06,987 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-21T22:02:06,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-21T22:02:06,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:06,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:06,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-21T22:02:06,988 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:06,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-21T22:02:06,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:06,989 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:06,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99d725fca8429199afb17d5a8043d99e in 522 msec 2024-11-21T22:02:07,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-21T22:02:07,015 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:07,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4521951792b1f21ac39ab04bdc740321 in 522 msec 2024-11-21T22:02:07,019 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:07,030 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:07,030 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-21T22:02:07,031 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-21T22:02:07,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T22:02:07,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741854_1030 (size=627) 2024-11-21T22:02:07,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741854_1030 (size=627) 2024-11-21T22:02:07,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741854_1030 (size=627) 2024-11-21T22:02:07,113 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:07,144 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:07,145 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-21T22:02:07,149 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:07,149 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-21T22:02:07,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 728 msec 2024-11-21T22:02:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T22:02:07,562 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T22:02:07,563 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563 2024-11-21T22:02:07,563 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:07,616 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:07,616 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-21T22:02:07,626 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:02:07,636 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-21T22:02:07,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741855_1031 (size=162) 2024-11-21T22:02:07,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741855_1031 (size=162) 2024-11-21T22:02:07,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741855_1031 (size=162) 2024-11-21T22:02:07,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741856_1032 (size=627) 2024-11-21T22:02:07,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741856_1032 (size=627) 2024-11-21T22:02:07,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741856_1032 (size=627) 2024-11-21T22:02:08,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741857_1033 (size=154) 2024-11-21T22:02:08,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741857_1033 (size=154) 2024-11-21T22:02:08,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741857_1033 (size=154) 2024-11-21T22:02:08,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:08,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:08,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,584 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-4097299344005249118.jar 2024-11-21T22:02:09,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-1163177773611382858.jar 2024-11-21T22:02:09,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:09,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:02:09,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:02:09,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:02:09,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:02:09,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:02:09,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:02:09,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:02:09,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:02:09,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:02:09,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:02:09,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:02:09,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:09,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:09,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:09,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:09,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:09,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:09,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:09,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741858_1034 (size=131440) 2024-11-21T22:02:09,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741858_1034 (size=131440) 2024-11-21T22:02:09,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741858_1034 (size=131440) 2024-11-21T22:02:09,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741859_1035 (size=4188619) 2024-11-21T22:02:09,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741859_1035 (size=4188619) 2024-11-21T22:02:09,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741859_1035 (size=4188619) 2024-11-21T22:02:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741860_1036 (size=1323991) 2024-11-21T22:02:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741860_1036 (size=1323991) 2024-11-21T22:02:10,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741860_1036 (size=1323991) 2024-11-21T22:02:10,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741861_1037 (size=903739) 2024-11-21T22:02:10,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741861_1037 (size=903739) 2024-11-21T22:02:10,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741861_1037 (size=903739) 2024-11-21T22:02:10,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741862_1038 (size=8360083) 2024-11-21T22:02:10,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741862_1038 (size=8360083) 2024-11-21T22:02:10,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741862_1038 (size=8360083) 2024-11-21T22:02:10,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741863_1039 (size=1877034) 2024-11-21T22:02:10,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741863_1039 (size=1877034) 2024-11-21T22:02:10,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741863_1039 (size=1877034) 2024-11-21T22:02:10,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741864_1040 (size=77835) 2024-11-21T22:02:10,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741864_1040 (size=77835) 2024-11-21T22:02:10,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741864_1040 (size=77835) 2024-11-21T22:02:10,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741865_1041 (size=30949) 2024-11-21T22:02:10,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741865_1041 (size=30949) 2024-11-21T22:02:10,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741865_1041 (size=30949) 2024-11-21T22:02:10,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741866_1042 (size=1597347) 2024-11-21T22:02:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741866_1042 (size=1597347) 2024-11-21T22:02:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741866_1042 (size=1597347) 2024-11-21T22:02:10,528 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:02:10,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741867_1043 (size=4695811) 2024-11-21T22:02:10,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741867_1043 (size=4695811) 2024-11-21T22:02:10,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741867_1043 (size=4695811) 2024-11-21T22:02:10,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741868_1044 (size=232957) 2024-11-21T22:02:10,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741868_1044 (size=232957) 2024-11-21T22:02:10,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741868_1044 (size=232957) 2024-11-21T22:02:10,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741869_1045 (size=127628) 2024-11-21T22:02:10,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741869_1045 (size=127628) 2024-11-21T22:02:10,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741869_1045 (size=127628) 2024-11-21T22:02:10,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741870_1046 (size=20406) 2024-11-21T22:02:10,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741870_1046 (size=20406) 2024-11-21T22:02:10,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741870_1046 (size=20406) 2024-11-21T22:02:11,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741871_1047 (size=5175431) 2024-11-21T22:02:11,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741871_1047 (size=5175431) 2024-11-21T22:02:11,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741871_1047 (size=5175431) 2024-11-21T22:02:11,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741872_1048 (size=440656) 2024-11-21T22:02:11,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741872_1048 (size=440656) 2024-11-21T22:02:11,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741872_1048 (size=440656) 2024-11-21T22:02:11,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741873_1049 (size=217634) 2024-11-21T22:02:11,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741873_1049 (size=217634) 2024-11-21T22:02:11,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741873_1049 (size=217634) 2024-11-21T22:02:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741874_1050 (size=1832290) 2024-11-21T22:02:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741874_1050 (size=1832290) 2024-11-21T22:02:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741874_1050 (size=1832290) 2024-11-21T22:02:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741875_1051 (size=322274) 2024-11-21T22:02:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741875_1051 (size=322274) 2024-11-21T22:02:11,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741875_1051 (size=322274) 2024-11-21T22:02:11,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741876_1052 (size=503880) 2024-11-21T22:02:11,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741876_1052 (size=503880) 2024-11-21T22:02:11,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741876_1052 (size=503880) 2024-11-21T22:02:11,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741877_1053 (size=6424742) 2024-11-21T22:02:11,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741877_1053 (size=6424742) 2024-11-21T22:02:11,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741877_1053 (size=6424742) 2024-11-21T22:02:11,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741878_1054 (size=29229) 2024-11-21T22:02:11,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741878_1054 (size=29229) 2024-11-21T22:02:11,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741878_1054 (size=29229) 2024-11-21T22:02:11,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741879_1055 (size=24096) 2024-11-21T22:02:11,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741879_1055 (size=24096) 2024-11-21T22:02:11,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741879_1055 (size=24096) 2024-11-21T22:02:11,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741880_1056 (size=111872) 2024-11-21T22:02:11,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741880_1056 (size=111872) 2024-11-21T22:02:11,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741880_1056 (size=111872) 2024-11-21T22:02:11,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741881_1057 (size=45609) 2024-11-21T22:02:11,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741881_1057 (size=45609) 2024-11-21T22:02:11,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741881_1057 (size=45609) 2024-11-21T22:02:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741882_1058 (size=136454) 2024-11-21T22:02:11,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741882_1058 (size=136454) 2024-11-21T22:02:11,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741882_1058 (size=136454) 2024-11-21T22:02:11,880 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:02:11,889 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-21T22:02:11,908 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:02:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741883_1059 (size=342) 2024-11-21T22:02:11,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741883_1059 (size=342) 2024-11-21T22:02:11,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741883_1059 (size=342) 2024-11-21T22:02:11,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741884_1060 (size=15) 2024-11-21T22:02:11,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741884_1060 (size=15) 2024-11-21T22:02:11,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741884_1060 (size=15) 2024-11-21T22:02:12,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741885_1061 (size=303735) 2024-11-21T22:02:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741885_1061 (size=303735) 2024-11-21T22:02:12,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741885_1061 (size=303735) 2024-11-21T22:02:12,639 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:02:12,640 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:02:12,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0001_000001 (auth:SIMPLE) from 127.0.0.1:42576 2024-11-21T22:02:14,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-21T22:02:14,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-21T22:02:21,086 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0001_000001 (auth:SIMPLE) from 127.0.0.1:60158 2024-11-21T22:02:21,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741886_1062 (size=349385) 2024-11-21T22:02:21,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741886_1062 (size=349385) 2024-11-21T22:02:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741886_1062 (size=349385) 2024-11-21T22:02:23,021 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:02:23,478 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0001_000001 (auth:SIMPLE) from 127.0.0.1:44188 2024-11-21T22:02:30,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741887_1063 (size=8190) 2024-11-21T22:02:30,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741887_1063 (size=8190) 2024-11-21T22:02:30,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741887_1063 (size=8190) 2024-11-21T22:02:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741888_1064 (size=5422) 2024-11-21T22:02:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741888_1064 (size=5422) 2024-11-21T22:02:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741888_1064 (size=5422) 2024-11-21T22:02:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741889_1065 (size=17419) 2024-11-21T22:02:31,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741889_1065 (size=17419) 2024-11-21T22:02:31,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741889_1065 (size=17419) 2024-11-21T22:02:31,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741890_1066 (size=464) 2024-11-21T22:02:31,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741890_1066 (size=464) 2024-11-21T22:02:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741890_1066 (size=464) 2024-11-21T22:02:31,132 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_0/usercache/jenkins/appcache/application_1732226522247_0001/container_1732226522247_0001_01_000002/launch_container.sh] 2024-11-21T22:02:31,132 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_0/usercache/jenkins/appcache/application_1732226522247_0001/container_1732226522247_0001_01_000002/container_tokens] 2024-11-21T22:02:31,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_0/usercache/jenkins/appcache/application_1732226522247_0001/container_1732226522247_0001_01_000002/sysfs] 2024-11-21T22:02:31,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741891_1067 (size=17419) 2024-11-21T22:02:31,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741891_1067 (size=17419) 2024-11-21T22:02:31,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741891_1067 (size=17419) 2024-11-21T22:02:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741892_1068 (size=349385) 2024-11-21T22:02:31,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741892_1068 (size=349385) 2024-11-21T22:02:31,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741892_1068 (size=349385) 2024-11-21T22:02:31,263 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0001_000001 (auth:SIMPLE) from 127.0.0.1:44202 2024-11-21T22:02:32,869 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:02:32,870 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:02:32,879 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-21T22:02:32,879 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:02:32,880 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:02:32,880 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-21T22:02:32,881 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-21T22:02:32,881 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-21T22:02:32,881 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563/.hbase-snapshot/testExportWithTargetName 2024-11-21T22:02:32,882 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-21T22:02:32,882 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226527563/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-21T22:02:32,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-21T22:02:32,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-21T22:02:32,910 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226552910"}]},"ts":"1732226552910"} 2024-11-21T22:02:32,913 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-21T22:02:32,913 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-21T22:02:32,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-21T22:02:32,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, UNASSIGN}] 2024-11-21T22:02:32,925 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, UNASSIGN 2024-11-21T22:02:32,925 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, UNASSIGN 2024-11-21T22:02:32,928 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=4521951792b1f21ac39ab04bdc740321, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:32,928 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=99d725fca8429199afb17d5a8043d99e, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:32,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, UNASSIGN because future has completed 2024-11-21T22:02:32,932 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:02:32,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99d725fca8429199afb17d5a8043d99e, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:02:32,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, UNASSIGN because future has completed 2024-11-21T22:02:32,938 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=2a884b411602,44629,1732226515126, table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T22:02:32,938 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:02:32,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4521951792b1f21ac39ab04bdc740321, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:33,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-21T22:02:33,093 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:33,093 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:02:33,094 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing 99d725fca8429199afb17d5a8043d99e, disabling compactions & flushes 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 4521951792b1f21ac39ab04bdc740321, disabling compactions & flushes 2024-11-21T22:02:33,094 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:33,094 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. after waiting 0 ms 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. after waiting 0 ms 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:33,094 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:33,100 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:02:33,100 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:02:33,103 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:33,103 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:33,103 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321. 2024-11-21T22:02:33,103 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e. 2024-11-21T22:02:33,104 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for 99d725fca8429199afb17d5a8043d99e: Waiting for close lock at 1732226553094Running coprocessor pre-close hooks at 1732226553094Disabling compacts and flushes for region at 1732226553094Disabling writes for close at 1732226553094Writing region close event to WAL at 1732226553096 (+2 ms)Running coprocessor post-close hooks at 1732226553101 (+5 ms)Closed at 1732226553103 (+2 ms) 2024-11-21T22:02:33,104 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 4521951792b1f21ac39ab04bdc740321: Waiting for close lock at 1732226553094Running coprocessor pre-close hooks at 1732226553094Disabling compacts and flushes for region at 1732226553094Disabling writes for close at 1732226553094Writing region close event to WAL at 1732226553096 (+2 ms)Running coprocessor post-close hooks at 1732226553101 (+5 ms)Closed at 1732226553103 (+2 ms) 2024-11-21T22:02:33,107 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed 99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:33,107 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=99d725fca8429199afb17d5a8043d99e, regionState=CLOSED 2024-11-21T22:02:33,108 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:33,109 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=4521951792b1f21ac39ab04bdc740321, regionState=CLOSED 2024-11-21T22:02:33,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99d725fca8429199afb17d5a8043d99e, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:02:33,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4521951792b1f21ac39ab04bdc740321, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:33,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=20 2024-11-21T22:02:33,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure 99d725fca8429199afb17d5a8043d99e, server=2a884b411602,41399,1732226514979 in 178 msec 2024-11-21T22:02:33,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=21 2024-11-21T22:02:33,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure 4521951792b1f21ac39ab04bdc740321, server=2a884b411602,44629,1732226515126 in 175 msec 2024-11-21T22:02:33,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=99d725fca8429199afb17d5a8043d99e, UNASSIGN in 191 msec 2024-11-21T22:02:33,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-21T22:02:33,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4521951792b1f21ac39ab04bdc740321, UNASSIGN in 193 msec 2024-11-21T22:02:33,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-21T22:02:33,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 204 msec 2024-11-21T22:02:33,125 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226553124"}]},"ts":"1732226553124"} 2024-11-21T22:02:33,127 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-21T22:02:33,127 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-21T22:02:33,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 227 msec 2024-11-21T22:02:33,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-21T22:02:33,233 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T22:02:33,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-21T22:02:33,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:33,250 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:33,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-21T22:02:33,253 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:33,257 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-21T22:02:33,262 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:33,262 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:33,266 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/recovered.edits] 2024-11-21T22:02:33,266 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/recovered.edits] 2024-11-21T22:02:33,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,270 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-21T22:02:33,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-21T22:02:33,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-21T22:02:33,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-21T22:02:33,273 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf/d6c3f85feb04481f85e3429084379cf5 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/cf/d6c3f85feb04481f85e3429084379cf5 2024-11-21T22:02:33,273 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf/90aec1cbb1cb47e884307f2a6237af8f to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/cf/90aec1cbb1cb47e884307f2a6237af8f 2024-11-21T22:02:33,276 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e/recovered.edits/9.seqid 2024-11-21T22:02:33,276 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321/recovered.edits/9.seqid 2024-11-21T22:02:33,277 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/4521951792b1f21ac39ab04bdc740321 2024-11-21T22:02:33,277 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithTargetName/99d725fca8429199afb17d5a8043d99e 2024-11-21T22:02:33,277 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-21T22:02:33,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,279 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T22:02:33,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,279 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,281 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:33,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-21T22:02:33,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-21T22:02:33,289 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-21T22:02:33,293 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-21T22:02:33,295 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:33,295 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-21T22:02:33,295 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226553295"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:33,295 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226553295"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:33,298 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:02:33,298 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 99d725fca8429199afb17d5a8043d99e, NAME => 'testtb-testExportWithTargetName,,1732226524464.99d725fca8429199afb17d5a8043d99e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4521951792b1f21ac39ab04bdc740321, NAME => 'testtb-testExportWithTargetName,1,1732226524464.4521951792b1f21ac39ab04bdc740321.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:02:33,298 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-21T22:02:33,299 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226553298"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:33,301 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-21T22:02:33,302 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T22:02:33,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 57 msec 2024-11-21T22:02:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-21T22:02:33,392 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-21T22:02:33,393 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T22:02:33,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-21T22:02:33,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-21T22:02:33,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-21T22:02:33,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-21T22:02:33,458 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=772 (was 719) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:34938 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:53088 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:44845 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1257 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1542805113_1 at /127.0.0.1:59004 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 111816) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46357 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:46357 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44845 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:57228 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=806 (was 779) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=541 (was 336) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1238 (was 3713) 2024-11-21T22:02:33,459 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=772 is superior to 500 2024-11-21T22:02:33,475 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=772, OpenFileDescriptor=806, MaxFileDescriptor=1048576, SystemLoadAverage=541, ProcessCount=19, AvailableMemoryMB=1236 2024-11-21T22:02:33,475 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=772 is superior to 500 2024-11-21T22:02:33,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:02:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:33,479 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:02:33,480 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:33,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-21T22:02:33,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T22:02:33,481 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:02:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741893_1069 (size=404) 2024-11-21T22:02:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741893_1069 (size=404) 2024-11-21T22:02:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741893_1069 (size=404) 2024-11-21T22:02:33,491 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 63ab513b366f9d889c2e0bf42478ccae, NAME => 'testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:33,492 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ccc8827a3646c7a343b322db3b424528, NAME => 'testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741895_1071 (size=65) 2024-11-21T22:02:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741895_1071 (size=65) 2024-11-21T22:02:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741894_1070 (size=65) 2024-11-21T22:02:33,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741895_1071 (size=65) 2024-11-21T22:02:33,506 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:33,506 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 63ab513b366f9d889c2e0bf42478ccae, disabling compactions & flushes 2024-11-21T22:02:33,506 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,506 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,506 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. after waiting 0 ms 2024-11-21T22:02:33,506 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,507 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,507 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 63ab513b366f9d889c2e0bf42478ccae: Waiting for close lock at 1732226553506Disabling compacts and flushes for region at 1732226553506Disabling writes for close at 1732226553506Writing region close event to WAL at 1732226553506Closed at 1732226553506 2024-11-21T22:02:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741894_1070 (size=65) 2024-11-21T22:02:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741894_1070 (size=65) 2024-11-21T22:02:33,509 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:33,509 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ccc8827a3646c7a343b322db3b424528, disabling compactions & flushes 2024-11-21T22:02:33,509 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,509 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,509 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. after waiting 0 ms 2024-11-21T22:02:33,509 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,509 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,509 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ccc8827a3646c7a343b322db3b424528: Waiting for close lock at 1732226553509Disabling compacts and flushes for region at 1732226553509Disabling writes for close at 1732226553509Writing region close event to WAL at 1732226553509Closed at 1732226553509 2024-11-21T22:02:33,511 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:02:33,511 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732226553511"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226553511"}]},"ts":"1732226553511"} 2024-11-21T22:02:33,511 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732226553511"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226553511"}]},"ts":"1732226553511"} 2024-11-21T22:02:33,515 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:02:33,517 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:02:33,517 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226553517"}]},"ts":"1732226553517"} 2024-11-21T22:02:33,520 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-21T22:02:33,520 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:02:33,522 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:02:33,522 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:02:33,522 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:02:33,522 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:02:33,522 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:02:33,522 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:02:33,522 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:02:33,522 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:02:33,522 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:02:33,523 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:02:33,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, ASSIGN}] 2024-11-21T22:02:33,525 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, ASSIGN 2024-11-21T22:02:33,525 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, ASSIGN 2024-11-21T22:02:33,526 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:02:33,526 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:02:33,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T22:02:33,677 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:02:33,677 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=63ab513b366f9d889c2e0bf42478ccae, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:33,677 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=ccc8827a3646c7a343b322db3b424528, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:33,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, ASSIGN because future has completed 2024-11-21T22:02:33,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63ab513b366f9d889c2e0bf42478ccae, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:33,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, ASSIGN because future has completed 2024-11-21T22:02:33,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure ccc8827a3646c7a343b322db3b424528, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:02:33,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T22:02:33,843 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,844 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => 63ab513b366f9d889c2e0bf42478ccae, NAME => 'testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:02:33,844 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. service=AccessControlService 2024-11-21T22:02:33,844 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:33,845 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,845 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:33,845 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,845 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,851 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,851 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => ccc8827a3646c7a343b322db3b424528, NAME => 'testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:02:33,852 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. service=AccessControlService 2024-11-21T22:02:33,852 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:33,852 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,852 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:33,852 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,852 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,863 INFO [StoreOpener-63ab513b366f9d889c2e0bf42478ccae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,863 INFO [StoreOpener-ccc8827a3646c7a343b322db3b424528-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,865 INFO [StoreOpener-63ab513b366f9d889c2e0bf42478ccae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 63ab513b366f9d889c2e0bf42478ccae columnFamilyName cf 2024-11-21T22:02:33,865 INFO [StoreOpener-ccc8827a3646c7a343b322db3b424528-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccc8827a3646c7a343b322db3b424528 columnFamilyName cf 2024-11-21T22:02:33,865 DEBUG [StoreOpener-ccc8827a3646c7a343b322db3b424528-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:33,865 DEBUG [StoreOpener-63ab513b366f9d889c2e0bf42478ccae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:33,866 INFO [StoreOpener-ccc8827a3646c7a343b322db3b424528-1 {}] regionserver.HStore(327): Store=ccc8827a3646c7a343b322db3b424528/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:33,866 INFO [StoreOpener-63ab513b366f9d889c2e0bf42478ccae-1 {}] regionserver.HStore(327): Store=63ab513b366f9d889c2e0bf42478ccae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:33,866 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,867 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,867 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,869 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,869 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,869 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,870 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,871 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,875 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:33,876 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened ccc8827a3646c7a343b322db3b424528; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63203180, jitterRate=-0.05819922685623169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:33,876 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:33,875 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:33,877 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for ccc8827a3646c7a343b322db3b424528: Running coprocessor pre-open hook at 1732226553852Writing region info on filesystem at 1732226553852Initializing all the Stores at 1732226553854 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226553854Cleaning up temporary data from old regions at 1732226553868 (+14 ms)Running coprocessor post-open hooks at 1732226553876 (+8 ms)Region opened successfully at 1732226553877 (+1 ms) 2024-11-21T22:02:33,877 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened 63ab513b366f9d889c2e0bf42478ccae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60909532, jitterRate=-0.09237724542617798}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:33,877 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:33,877 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for 63ab513b366f9d889c2e0bf42478ccae: Running coprocessor pre-open hook at 1732226553845Writing region info on filesystem at 1732226553845Initializing all the Stores at 1732226553846 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226553846Cleaning up temporary data from old regions at 1732226553869 (+23 ms)Running coprocessor post-open hooks at 1732226553877 (+8 ms)Region opened successfully at 1732226553877 2024-11-21T22:02:33,878 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., pid=29, masterSystemTime=1732226553847 2024-11-21T22:02:33,878 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae., pid=28, masterSystemTime=1732226553838 2024-11-21T22:02:33,881 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,881 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:33,883 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=63ab513b366f9d889c2e0bf42478ccae, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:33,886 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,886 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:33,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63ab513b366f9d889c2e0bf42478ccae, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:33,888 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=ccc8827a3646c7a343b322db3b424528, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:33,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure ccc8827a3646c7a343b322db3b424528, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:02:33,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=26 2024-11-21T22:02:33,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure 63ab513b366f9d889c2e0bf42478ccae, server=2a884b411602,44629,1732226515126 in 205 msec 2024-11-21T22:02:33,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=27 2024-11-21T22:02:33,895 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, ASSIGN in 370 msec 2024-11-21T22:02:33,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure ccc8827a3646c7a343b322db3b424528, server=2a884b411602,41399,1732226514979 in 201 msec 2024-11-21T22:02:33,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-11-21T22:02:33,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, ASSIGN in 372 msec 2024-11-21T22:02:33,903 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:02:33,903 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226553903"}]},"ts":"1732226553903"} 2024-11-21T22:02:33,906 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-21T22:02:33,907 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:02:33,908 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-21T22:02:33,914 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T22:02:33,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,962 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:33,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:33,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:33,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:33,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:33,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 494 msec 2024-11-21T22:02:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T22:02:34,112 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T22:02:34,112 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-21T22:02:34,112 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:34,120 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-21T22:02:34,120 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:34,120 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-21T22:02:34,121 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:34,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T22:02:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226554126 (current time:1732226554126). 2024-11-21T22:02:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:02:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-21T22:02:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30195bd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:34,128 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:34,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:34,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:34,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42b03809, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:34,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:34,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,130 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:34,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@225a6adf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:34,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:34,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:34,133 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37234, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:34,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,134 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@730c0159, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:34,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:34,136 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:34,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:34,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:34,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5df3d3cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:34,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:34,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,137 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48414, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:34,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f59aaf1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:34,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:34,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:34,140 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:34,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:34,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:34,143 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:34,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,145 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:34,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T22:02:34,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T22:02:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-21T22:02:34,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T22:02:34,148 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:34,149 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:34,152 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741896_1072 (size=161) 2024-11-21T22:02:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741896_1072 (size=161) 2024-11-21T22:02:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741896_1072 (size=161) 2024-11-21T22:02:34,161 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:34,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528}] 2024-11-21T22:02:34,163 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:34,163 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:34,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T22:02:34,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-21T22:02:34,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-21T22:02:34,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:34,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for ccc8827a3646c7a343b322db3b424528: 2024-11-21T22:02:34,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-21T22:02:34,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:34,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for 63ab513b366f9d889c2e0bf42478ccae: 2024-11-21T22:02:34,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-21T22:02:34,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-21T22:02:34,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:34,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:02:34,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-21T22:02:34,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:34,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:02:34,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741897_1073 (size=68) 2024-11-21T22:02:34,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741897_1073 (size=68) 2024-11-21T22:02:34,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741898_1074 (size=68) 2024-11-21T22:02:34,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741898_1074 (size=68) 2024-11-21T22:02:34,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741898_1074 (size=68) 2024-11-21T22:02:34,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:34,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-21T22:02:34,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-21T22:02:34,338 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:34,338 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:34,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741897_1073 (size=68) 2024-11-21T22:02:34,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:34,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-21T22:02:34,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-21T22:02:34,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:34,341 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:34,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae in 179 msec 2024-11-21T22:02:34,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=30 2024-11-21T22:02:34,343 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:34,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528 in 180 msec 2024-11-21T22:02:34,344 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:34,345 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:34,345 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-21T22:02:34,345 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-21T22:02:34,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741899_1075 (size=543) 2024-11-21T22:02:34,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741899_1075 (size=543) 2024-11-21T22:02:34,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741899_1075 (size=543) 2024-11-21T22:02:34,359 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:34,367 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:34,367 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-21T22:02:34,369 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:34,369 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-21T22:02:34,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 224 msec 2024-11-21T22:02:34,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-21T22:02:34,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-21T22:02:34,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-21T22:02:34,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T22:02:34,462 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T22:02:34,468 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0dcdf288897194d589e43492e1146cc68', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:02:34,469 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2bdc9e7a16d786dcb9264d8ad63516cf9', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,471 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='3bd350da5078a2f725d18487eed49b22b', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,472 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='149ea347669e530539345576ee7aceb60', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,473 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='4c7bfc893675e160afcec678798a728b5', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,474 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5d92b1dddba17f55143fcc67210df8dd7', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,476 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='65657df4eaddffe5aaefdbf6099e11f8b', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:34,477 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5f7efbd24f41ca3c622661de90f41f5b', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:34,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:34,487 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:34,491 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-21T22:02:34,491 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:34,491 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:34,493 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:34,499 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:34,507 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:34,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T22:02:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226554511 (current time:1732226554511). 2024-11-21T22:02:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:02:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-21T22:02:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@122b066d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:34,513 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:34,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:34,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:34,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23aefbea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:34,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:34,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,515 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:34,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19cbe29b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:34,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:34,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:34,518 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37244, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:34,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:34,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:34,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,520 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d8d3b53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:34,522 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:34,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:34,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:34,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57df81a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:34,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:34,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,524 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48450, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:34,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4433004f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:34,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:34,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:34,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:34,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37252, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:34,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:34,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:34,531 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52830, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:34,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:34,532 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T22:02:34,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:34,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T22:02:34,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-21T22:02:34,536 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T22:02:34,537 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:34,540 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:34,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741900_1076 (size=156) 2024-11-21T22:02:34,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741900_1076 (size=156) 2024-11-21T22:02:34,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741900_1076 (size=156) 2024-11-21T22:02:34,550 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:34,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528}] 2024-11-21T22:02:34,552 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:34,552 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:34,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T22:02:34,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-21T22:02:34,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-21T22:02:34,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:34,708 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing 63ab513b366f9d889c2e0bf42478ccae 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-21T22:02:34,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:34,709 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing ccc8827a3646c7a343b322db3b424528 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-21T22:02:34,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/.tmp/cf/58612bd412c449c1af54c3e77aa8d8e3 is 71, key is 0651041e7ab8d7d3a80246a55e027814/cf:q/1732226554476/Put/seqid=0 2024-11-21T22:02:34,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/.tmp/cf/26c416e42d324faca9f7f6354bc83a19 is 71, key is 2faa495760a7052b5996345d4d870ec0/cf:q/1732226554483/Put/seqid=0 2024-11-21T22:02:34,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741901_1077 (size=5216) 2024-11-21T22:02:34,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741901_1077 (size=5216) 2024-11-21T22:02:34,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741902_1078 (size=8394) 2024-11-21T22:02:34,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741901_1077 (size=5216) 2024-11-21T22:02:34,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741902_1078 (size=8394) 2024-11-21T22:02:34,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741902_1078 (size=8394) 2024-11-21T22:02:34,737 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/.tmp/cf/58612bd412c449c1af54c3e77aa8d8e3 2024-11-21T22:02:34,740 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/.tmp/cf/26c416e42d324faca9f7f6354bc83a19 2024-11-21T22:02:34,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/.tmp/cf/58612bd412c449c1af54c3e77aa8d8e3 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf/58612bd412c449c1af54c3e77aa8d8e3 2024-11-21T22:02:34,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/.tmp/cf/26c416e42d324faca9f7f6354bc83a19 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf/26c416e42d324faca9f7f6354bc83a19 2024-11-21T22:02:34,752 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf/58612bd412c449c1af54c3e77aa8d8e3, entries=2, sequenceid=6, filesize=5.1 K 2024-11-21T22:02:34,753 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 63ab513b366f9d889c2e0bf42478ccae in 45ms, sequenceid=6, compaction requested=false 2024-11-21T22:02:34,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-21T22:02:34,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for 63ab513b366f9d889c2e0bf42478ccae: 2024-11-21T22:02:34,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. for snaptb0-testExportWithResetTtl completed. 2024-11-21T22:02:34,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:34,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf/58612bd412c449c1af54c3e77aa8d8e3] hfiles 2024-11-21T22:02:34,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf/58612bd412c449c1af54c3e77aa8d8e3 for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,755 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf/26c416e42d324faca9f7f6354bc83a19, entries=48, sequenceid=6, filesize=8.2 K 2024-11-21T22:02:34,757 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ccc8827a3646c7a343b322db3b424528 in 47ms, sequenceid=6, compaction requested=false 2024-11-21T22:02:34,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for ccc8827a3646c7a343b322db3b424528: 2024-11-21T22:02:34,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. for snaptb0-testExportWithResetTtl completed. 2024-11-21T22:02:34,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:34,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf/26c416e42d324faca9f7f6354bc83a19] hfiles 2024-11-21T22:02:34,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf/26c416e42d324faca9f7f6354bc83a19 for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741903_1079 (size=107) 2024-11-21T22:02:34,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741903_1079 (size=107) 2024-11-21T22:02:34,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741903_1079 (size=107) 2024-11-21T22:02:34,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:34,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-21T22:02:34,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-21T22:02:34,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:34,765 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:34,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741904_1080 (size=107) 2024-11-21T22:02:34,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 63ab513b366f9d889c2e0bf42478ccae in 217 msec 2024-11-21T22:02:34,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741904_1080 (size=107) 2024-11-21T22:02:34,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741904_1080 (size=107) 2024-11-21T22:02:34,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:34,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-21T22:02:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-21T22:02:34,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:34,771 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:34,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-21T22:02:34,775 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:34,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ccc8827a3646c7a343b322db3b424528 in 222 msec 2024-11-21T22:02:34,776 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:34,777 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:34,777 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,778 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741905_1081 (size=621) 2024-11-21T22:02:34,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741905_1081 (size=621) 2024-11-21T22:02:34,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741905_1081 (size=621) 2024-11-21T22:02:34,813 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:34,826 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:34,827 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-21T22:02:34,829 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:34,829 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-21T22:02:34,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 295 msec 2024-11-21T22:02:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T22:02:34,851 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T22:02:34,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:02:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:34,856 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:02:34,856 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:34,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-21T22:02:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T22:02:34,858 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:02:34,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741906_1082 (size=397) 2024-11-21T22:02:34,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741906_1082 (size=397) 2024-11-21T22:02:34,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741906_1082 (size=397) 2024-11-21T22:02:34,870 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1104283653669c50fb0250c5091209e0, NAME => 'testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:34,871 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => cfabebbcb8733987892156b399f30287, NAME => 'testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:34,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741907_1083 (size=58) 2024-11-21T22:02:34,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741907_1083 (size=58) 2024-11-21T22:02:34,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741907_1083 (size=58) 2024-11-21T22:02:34,883 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:34,883 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing cfabebbcb8733987892156b399f30287, disabling compactions & flushes 2024-11-21T22:02:34,883 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:34,883 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:34,883 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. after waiting 0 ms 2024-11-21T22:02:34,883 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:34,883 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:34,883 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for cfabebbcb8733987892156b399f30287: Waiting for close lock at 1732226554883Disabling compacts and flushes for region at 1732226554883Disabling writes for close at 1732226554883Writing region close event to WAL at 1732226554883Closed at 1732226554883 2024-11-21T22:02:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741908_1084 (size=58) 2024-11-21T22:02:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741908_1084 (size=58) 2024-11-21T22:02:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741908_1084 (size=58) 2024-11-21T22:02:34,891 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:34,891 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 1104283653669c50fb0250c5091209e0, disabling compactions & flushes 2024-11-21T22:02:34,891 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:34,891 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:34,892 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. after waiting 0 ms 2024-11-21T22:02:34,892 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:34,892 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:34,892 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1104283653669c50fb0250c5091209e0: Waiting for close lock at 1732226554891Disabling compacts and flushes for region at 1732226554891Disabling writes for close at 1732226554892 (+1 ms)Writing region close event to WAL at 1732226554892Closed at 1732226554892 2024-11-21T22:02:34,893 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:02:34,894 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732226554893"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226554893"}]},"ts":"1732226554893"} 2024-11-21T22:02:34,894 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732226554893"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226554893"}]},"ts":"1732226554893"} 2024-11-21T22:02:34,898 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:02:34,899 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:02:34,899 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226554899"}]},"ts":"1732226554899"} 2024-11-21T22:02:34,902 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-21T22:02:34,902 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:02:34,904 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:02:34,904 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:02:34,904 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:02:34,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:02:34,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, ASSIGN}] 2024-11-21T22:02:34,906 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, ASSIGN 2024-11-21T22:02:34,906 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, ASSIGN 2024-11-21T22:02:34,907 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:02:34,907 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:02:34,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T22:02:35,058 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:02:35,058 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=cfabebbcb8733987892156b399f30287, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:02:35,058 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=1104283653669c50fb0250c5091209e0, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:35,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, ASSIGN because future has completed 2024-11-21T22:02:35,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1104283653669c50fb0250c5091209e0, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:35,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, ASSIGN because future has completed 2024-11-21T22:02:35,065 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure cfabebbcb8733987892156b399f30287, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:02:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T22:02:35,223 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:35,223 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => 1104283653669c50fb0250c5091209e0, NAME => 'testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:02:35,224 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. service=AccessControlService 2024-11-21T22:02:35,224 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:35,224 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,224 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:35,225 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,225 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,226 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:35,227 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => cfabebbcb8733987892156b399f30287, NAME => 'testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:02:35,227 INFO [StoreOpener-1104283653669c50fb0250c5091209e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,227 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. service=AccessControlService 2024-11-21T22:02:35,227 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:35,227 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,227 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:35,227 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,227 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,231 INFO [StoreOpener-1104283653669c50fb0250c5091209e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1104283653669c50fb0250c5091209e0 columnFamilyName cf 2024-11-21T22:02:35,232 DEBUG [StoreOpener-1104283653669c50fb0250c5091209e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:35,232 INFO [StoreOpener-1104283653669c50fb0250c5091209e0-1 {}] regionserver.HStore(327): Store=1104283653669c50fb0250c5091209e0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:35,232 INFO [StoreOpener-cfabebbcb8733987892156b399f30287-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,233 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,234 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,234 INFO [StoreOpener-cfabebbcb8733987892156b399f30287-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cfabebbcb8733987892156b399f30287 columnFamilyName cf 2024-11-21T22:02:35,234 DEBUG [StoreOpener-cfabebbcb8733987892156b399f30287-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:35,234 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,235 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,235 INFO [StoreOpener-cfabebbcb8733987892156b399f30287-1 {}] regionserver.HStore(327): Store=cfabebbcb8733987892156b399f30287/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:35,235 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,235 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,237 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,238 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,238 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,238 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,238 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,242 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,242 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:35,242 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened 1104283653669c50fb0250c5091209e0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72468787, jitterRate=0.07986907660961151}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:35,242 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,243 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for 1104283653669c50fb0250c5091209e0: Running coprocessor pre-open hook at 1732226555225Writing region info on filesystem at 1732226555225Initializing all the Stores at 1732226555226 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226555226Cleaning up temporary data from old regions at 1732226555235 (+9 ms)Running coprocessor post-open hooks at 1732226555243 (+8 ms)Region opened successfully at 1732226555243 2024-11-21T22:02:35,245 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:35,245 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0., pid=39, masterSystemTime=1732226555217 2024-11-21T22:02:35,246 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened cfabebbcb8733987892156b399f30287; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64170422, jitterRate=-0.043786197900772095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:35,246 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,246 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for cfabebbcb8733987892156b399f30287: Running coprocessor pre-open hook at 1732226555228Writing region info on filesystem at 1732226555228Initializing all the Stores at 1732226555229 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226555229Cleaning up temporary data from old regions at 1732226555238 (+9 ms)Running coprocessor post-open hooks at 1732226555246 (+8 ms)Region opened successfully at 1732226555246 2024-11-21T22:02:35,248 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287., pid=40, masterSystemTime=1732226555222 2024-11-21T22:02:35,249 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:35,249 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:35,249 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=1104283653669c50fb0250c5091209e0, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:35,251 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:35,251 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:35,252 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=cfabebbcb8733987892156b399f30287, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:02:35,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1104283653669c50fb0250c5091209e0, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:35,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure cfabebbcb8733987892156b399f30287, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:02:35,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=37 2024-11-21T22:02:35,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure 1104283653669c50fb0250c5091209e0, server=2a884b411602,44629,1732226515126 in 193 msec 2024-11-21T22:02:35,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-21T22:02:35,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, ASSIGN in 354 msec 2024-11-21T22:02:35,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure cfabebbcb8733987892156b399f30287, server=2a884b411602,36419,1732226515183 in 192 msec 2024-11-21T22:02:35,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=36 2024-11-21T22:02:35,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, ASSIGN in 357 msec 2024-11-21T22:02:35,265 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:02:35,266 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226555266"}]},"ts":"1732226555266"} 2024-11-21T22:02:35,268 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-21T22:02:35,271 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:02:35,271 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-21T22:02:35,276 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T22:02:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:35,304 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:35,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:35,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 460 msec 2024-11-21T22:02:35,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T22:02:35,482 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-21T22:02:35,482 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-21T22:02:35,482 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:35,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-21T22:02:35,487 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:35,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-21T22:02:35,487 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:35,496 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='0dc19e1e3beba89be9bc136a2ea266128', locateType=CURRENT is [region=testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:02:35,497 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='1507db9ae16cc2e9a40914d3fd65a1c10', locateType=CURRENT is [region=testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:35,498 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='23268872ec633d05ed86595107a436702', locateType=CURRENT is [region=testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:35,500 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3763e8f8775cc2872fb2db425b303e8e5', locateType=CURRENT is [region=testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:35,504 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:35,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:35,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52832, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:35,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36419 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:35,514 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:35,517 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-21T22:02:35,517 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:35,518 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:35,520 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:35,528 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:35,538 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T22:02:35,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-21T22:02:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226555543 (current time:1732226555543). 2024-11-21T22:02:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-21T22:02:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5539eff1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:35,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:35,544 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:35,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:35,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:35,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95eed23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:35,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:35,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:35,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:35,546 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48460, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:35,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79091bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:35,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:35,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:35,554 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:35,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:35,557 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31010b71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:35,558 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:35,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7250384c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:35,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:35,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:35,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:35,560 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:35,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76bb1625, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:35,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:35,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:35,564 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37268, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:35,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:35,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:35,569 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:35,571 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:35,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:35,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:35,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T22:02:35,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:35,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:35,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-21T22:02:35,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-21T22:02:35,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T22:02:35,578 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:35,579 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:35,585 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741909_1085 (size=143) 2024-11-21T22:02:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741909_1085 (size=143) 2024-11-21T22:02:35,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741909_1085 (size=143) 2024-11-21T22:02:35,609 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:35,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1104283653669c50fb0250c5091209e0}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cfabebbcb8733987892156b399f30287}] 2024-11-21T22:02:35,611 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,612 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T22:02:35,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-21T22:02:35,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-21T22:02:35,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:35,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:35,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing 1104283653669c50fb0250c5091209e0 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-21T22:02:35,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing cfabebbcb8733987892156b399f30287 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-21T22:02:35,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/.tmp/cf/04f2990e879f4cb6a699b9ea14d50f49 is 71, key is 1579397eabde53129ba2627d14bd39b8/cf:q/1732226555511/Put/seqid=0 2024-11-21T22:02:35,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/.tmp/cf/35ed4b0104e84a67847451ef21c03e04 is 71, key is 0334da0d738c8bf559eb582bd1f62b8a/cf:q/1732226555507/Put/seqid=0 2024-11-21T22:02:35,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741910_1086 (size=8122) 2024-11-21T22:02:35,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741910_1086 (size=8122) 2024-11-21T22:02:35,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741910_1086 (size=8122) 2024-11-21T22:02:35,793 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/.tmp/cf/04f2990e879f4cb6a699b9ea14d50f49 2024-11-21T22:02:35,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741911_1087 (size=5490) 2024-11-21T22:02:35,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741911_1087 (size=5490) 2024-11-21T22:02:35,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741911_1087 (size=5490) 2024-11-21T22:02:35,799 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/.tmp/cf/35ed4b0104e84a67847451ef21c03e04 2024-11-21T22:02:35,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/.tmp/cf/35ed4b0104e84a67847451ef21c03e04 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf/35ed4b0104e84a67847451ef21c03e04 2024-11-21T22:02:35,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf/35ed4b0104e84a67847451ef21c03e04, entries=6, sequenceid=5, filesize=5.4 K 2024-11-21T22:02:35,820 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 1104283653669c50fb0250c5091209e0 in 56ms, sequenceid=5, compaction requested=false 2024-11-21T22:02:35,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-21T22:02:35,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for 1104283653669c50fb0250c5091209e0: 2024-11-21T22:02:35,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. for snaptb-testExportWithResetTtl completed. 2024-11-21T22:02:35,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-21T22:02:35,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:35,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf/35ed4b0104e84a67847451ef21c03e04] hfiles 2024-11-21T22:02:35,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf/35ed4b0104e84a67847451ef21c03e04 for snapshot=snaptb-testExportWithResetTtl 2024-11-21T22:02:35,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/.tmp/cf/04f2990e879f4cb6a699b9ea14d50f49 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf/04f2990e879f4cb6a699b9ea14d50f49 2024-11-21T22:02:35,831 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf/04f2990e879f4cb6a699b9ea14d50f49, entries=44, sequenceid=5, filesize=7.9 K 2024-11-21T22:02:35,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for cfabebbcb8733987892156b399f30287 in 68ms, sequenceid=5, compaction requested=false 2024-11-21T22:02:35,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for cfabebbcb8733987892156b399f30287: 2024-11-21T22:02:35,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. for snaptb-testExportWithResetTtl completed. 2024-11-21T22:02:35,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-21T22:02:35,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:35,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf/04f2990e879f4cb6a699b9ea14d50f49] hfiles 2024-11-21T22:02:35,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf/04f2990e879f4cb6a699b9ea14d50f49 for snapshot=snaptb-testExportWithResetTtl 2024-11-21T22:02:35,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741912_1088 (size=100) 2024-11-21T22:02:35,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741912_1088 (size=100) 2024-11-21T22:02:35,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:35,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741912_1088 (size=100) 2024-11-21T22:02:35,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-21T22:02:35,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-21T22:02:35,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,848 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:35,851 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1104283653669c50fb0250c5091209e0 in 240 msec 2024-11-21T22:02:35,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741913_1089 (size=100) 2024-11-21T22:02:35,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741913_1089 (size=100) 2024-11-21T22:02:35,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741913_1089 (size=100) 2024-11-21T22:02:35,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:35,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-21T22:02:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-21T22:02:35,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,862 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cfabebbcb8733987892156b399f30287 2024-11-21T22:02:35,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-11-21T22:02:35,866 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:35,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cfabebbcb8733987892156b399f30287 in 255 msec 2024-11-21T22:02:35,867 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:35,868 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:35,868 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-21T22:02:35,869 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-21T22:02:35,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741914_1090 (size=600) 2024-11-21T22:02:35,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741914_1090 (size=600) 2024-11-21T22:02:35,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741914_1090 (size=600) 2024-11-21T22:02:35,884 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:35,890 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:35,890 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-21T22:02:35,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T22:02:35,892 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:35,892 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-21T22:02:35,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 319 msec 2024-11-21T22:02:36,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T22:02:36,202 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-21T22:02:36,215 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215 2024-11-21T22:02:36,215 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:36,265 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:36,265 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-21T22:02:36,269 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:02:36,275 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-21T22:02:36,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741915_1091 (size=143) 2024-11-21T22:02:36,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741915_1091 (size=143) 2024-11-21T22:02:36,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741915_1091 (size=143) 2024-11-21T22:02:36,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741916_1092 (size=600) 2024-11-21T22:02:36,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741916_1092 (size=600) 2024-11-21T22:02:36,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741916_1092 (size=600) 2024-11-21T22:02:36,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741917_1093 (size=141) 2024-11-21T22:02:36,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741917_1093 (size=141) 2024-11-21T22:02:36,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741917_1093 (size=141) 2024-11-21T22:02:36,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:36,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:36,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,392 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0001_000001 (auth:SIMPLE) from 127.0.0.1:50394 2024-11-21T22:02:37,400 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_0/usercache/jenkins/appcache/application_1732226522247_0001/container_1732226522247_0001_01_000001/launch_container.sh] 2024-11-21T22:02:37,400 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_0/usercache/jenkins/appcache/application_1732226522247_0001/container_1732226522247_0001_01_000001/container_tokens] 2024-11-21T22:02:37,401 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_0/usercache/jenkins/appcache/application_1732226522247_0001/container_1732226522247_0001_01_000001/sysfs] 2024-11-21T22:02:37,892 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-1918723423249935829.jar 2024-11-21T22:02:37,892 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,892 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,955 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-7866379370014401930.jar 2024-11-21T22:02:37,956 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,956 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:37,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:02:37,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:02:37,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:02:37,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:02:37,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:02:37,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:02:37,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:02:37,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:02:37,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:02:37,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:02:37,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:02:37,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:37,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:37,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:37,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:37,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:37,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:37,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:38,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741918_1094 (size=131440) 2024-11-21T22:02:38,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741918_1094 (size=131440) 2024-11-21T22:02:38,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741918_1094 (size=131440) 2024-11-21T22:02:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741919_1095 (size=4188619) 2024-11-21T22:02:38,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741919_1095 (size=4188619) 2024-11-21T22:02:38,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741919_1095 (size=4188619) 2024-11-21T22:02:38,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741920_1096 (size=1323991) 2024-11-21T22:02:38,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741920_1096 (size=1323991) 2024-11-21T22:02:38,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741920_1096 (size=1323991) 2024-11-21T22:02:38,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741921_1097 (size=903739) 2024-11-21T22:02:38,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741921_1097 (size=903739) 2024-11-21T22:02:38,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741921_1097 (size=903739) 2024-11-21T22:02:38,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741922_1098 (size=8360083) 2024-11-21T22:02:38,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741922_1098 (size=8360083) 2024-11-21T22:02:38,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741922_1098 (size=8360083) 2024-11-21T22:02:38,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741923_1099 (size=6424742) 2024-11-21T22:02:38,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741923_1099 (size=6424742) 2024-11-21T22:02:38,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741923_1099 (size=6424742) 2024-11-21T22:02:38,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741924_1100 (size=440656) 2024-11-21T22:02:38,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741924_1100 (size=440656) 2024-11-21T22:02:38,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741924_1100 (size=440656) 2024-11-21T22:02:38,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741925_1101 (size=1877034) 2024-11-21T22:02:38,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741925_1101 (size=1877034) 2024-11-21T22:02:38,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741925_1101 (size=1877034) 2024-11-21T22:02:38,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741926_1102 (size=77835) 2024-11-21T22:02:38,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741926_1102 (size=77835) 2024-11-21T22:02:38,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741926_1102 (size=77835) 2024-11-21T22:02:38,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741927_1103 (size=30949) 2024-11-21T22:02:38,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741927_1103 (size=30949) 2024-11-21T22:02:38,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741927_1103 (size=30949) 2024-11-21T22:02:38,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741928_1104 (size=1597347) 2024-11-21T22:02:38,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741928_1104 (size=1597347) 2024-11-21T22:02:38,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741928_1104 (size=1597347) 2024-11-21T22:02:38,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741929_1105 (size=4695811) 2024-11-21T22:02:38,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741929_1105 (size=4695811) 2024-11-21T22:02:38,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741929_1105 (size=4695811) 2024-11-21T22:02:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741930_1106 (size=232957) 2024-11-21T22:02:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741930_1106 (size=232957) 2024-11-21T22:02:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741930_1106 (size=232957) 2024-11-21T22:02:38,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741931_1107 (size=127628) 2024-11-21T22:02:38,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741931_1107 (size=127628) 2024-11-21T22:02:38,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741931_1107 (size=127628) 2024-11-21T22:02:38,663 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:02:38,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741932_1108 (size=20406) 2024-11-21T22:02:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741932_1108 (size=20406) 2024-11-21T22:02:38,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741932_1108 (size=20406) 2024-11-21T22:02:38,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741933_1109 (size=5175431) 2024-11-21T22:02:38,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741933_1109 (size=5175431) 2024-11-21T22:02:38,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741933_1109 (size=5175431) 2024-11-21T22:02:39,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741934_1110 (size=217634) 2024-11-21T22:02:39,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741934_1110 (size=217634) 2024-11-21T22:02:39,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741934_1110 (size=217634) 2024-11-21T22:02:39,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741935_1111 (size=1832290) 2024-11-21T22:02:39,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741935_1111 (size=1832290) 2024-11-21T22:02:39,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741935_1111 (size=1832290) 2024-11-21T22:02:39,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741936_1112 (size=322274) 2024-11-21T22:02:39,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741936_1112 (size=322274) 2024-11-21T22:02:39,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741936_1112 (size=322274) 2024-11-21T22:02:39,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741937_1113 (size=503880) 2024-11-21T22:02:39,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741937_1113 (size=503880) 2024-11-21T22:02:39,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741937_1113 (size=503880) 2024-11-21T22:02:39,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741938_1114 (size=29229) 2024-11-21T22:02:39,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741938_1114 (size=29229) 2024-11-21T22:02:39,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741938_1114 (size=29229) 2024-11-21T22:02:40,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741939_1115 (size=24096) 2024-11-21T22:02:40,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741939_1115 (size=24096) 2024-11-21T22:02:40,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741939_1115 (size=24096) 2024-11-21T22:02:40,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741940_1116 (size=111872) 2024-11-21T22:02:40,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741940_1116 (size=111872) 2024-11-21T22:02:40,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741940_1116 (size=111872) 2024-11-21T22:02:40,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741941_1117 (size=45609) 2024-11-21T22:02:40,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741941_1117 (size=45609) 2024-11-21T22:02:40,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741941_1117 (size=45609) 2024-11-21T22:02:40,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741942_1118 (size=136454) 2024-11-21T22:02:40,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741942_1118 (size=136454) 2024-11-21T22:02:40,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741942_1118 (size=136454) 2024-11-21T22:02:40,403 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:02:40,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-21T22:02:40,412 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:02:40,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741943_1119 (size=324) 2024-11-21T22:02:40,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741943_1119 (size=324) 2024-11-21T22:02:40,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741943_1119 (size=324) 2024-11-21T22:02:40,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741944_1120 (size=15) 2024-11-21T22:02:40,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741944_1120 (size=15) 2024-11-21T22:02:40,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741944_1120 (size=15) 2024-11-21T22:02:40,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741945_1121 (size=303726) 2024-11-21T22:02:40,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741945_1121 (size=303726) 2024-11-21T22:02:40,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741945_1121 (size=303726) 2024-11-21T22:02:40,563 INFO [master/2a884b411602:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T22:02:40,563 INFO [master/2a884b411602:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T22:02:40,576 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:02:40,577 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:02:41,111 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0002_000001 (auth:SIMPLE) from 127.0.0.1:48472 2024-11-21T22:02:44,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-21T22:02:44,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-21T22:02:47,367 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0002_000001 (auth:SIMPLE) from 127.0.0.1:57140 2024-11-21T22:02:47,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741946_1122 (size=349376) 2024-11-21T22:02:47,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741946_1122 (size=349376) 2024-11-21T22:02:47,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741946_1122 (size=349376) 2024-11-21T22:02:49,626 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0002_000001 (auth:SIMPLE) from 127.0.0.1:42698 2024-11-21T22:02:53,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741947_1123 (size=8122) 2024-11-21T22:02:53,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741947_1123 (size=8122) 2024-11-21T22:02:53,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741947_1123 (size=8122) 2024-11-21T22:02:53,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:02:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741948_1124 (size=5490) 2024-11-21T22:02:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741948_1124 (size=5490) 2024-11-21T22:02:53,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741948_1124 (size=5490) 2024-11-21T22:02:53,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741949_1125 (size=17398) 2024-11-21T22:02:53,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741949_1125 (size=17398) 2024-11-21T22:02:53,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741949_1125 (size=17398) 2024-11-21T22:02:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741950_1126 (size=461) 2024-11-21T22:02:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741950_1126 (size=461) 2024-11-21T22:02:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741950_1126 (size=461) 2024-11-21T22:02:53,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741951_1127 (size=17398) 2024-11-21T22:02:53,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741951_1127 (size=17398) 2024-11-21T22:02:53,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741951_1127 (size=17398) 2024-11-21T22:02:53,186 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0002/container_1732226522247_0002_01_000002/launch_container.sh] 2024-11-21T22:02:53,186 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0002/container_1732226522247_0002_01_000002/container_tokens] 2024-11-21T22:02:53,186 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0002/container_1732226522247_0002_01_000002/sysfs] 2024-11-21T22:02:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741952_1128 (size=349376) 2024-11-21T22:02:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741952_1128 (size=349376) 2024-11-21T22:02:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741952_1128 (size=349376) 2024-11-21T22:02:53,221 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0002_000001 (auth:SIMPLE) from 127.0.0.1:60894 2024-11-21T22:02:54,811 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:02:54,811 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:02:54,817 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-21T22:02:54,817 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:02:54,817 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:02:54,817 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-21T22:02:54,818 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-21T22:02:54,818 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-21T22:02:54,818 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-21T22:02:54,818 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-21T22:02:54,818 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226556215/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-21T22:02:54,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-21T22:02:54,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:54,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-21T22:02:54,830 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226574830"}]},"ts":"1732226574830"} 2024-11-21T22:02:54,832 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-21T22:02:54,832 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-21T22:02:54,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-21T22:02:54,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, UNASSIGN}] 2024-11-21T22:02:54,835 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, UNASSIGN 2024-11-21T22:02:54,835 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, UNASSIGN 2024-11-21T22:02:54,836 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=cfabebbcb8733987892156b399f30287, regionState=CLOSING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:02:54,836 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=1104283653669c50fb0250c5091209e0, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:54,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, UNASSIGN because future has completed 2024-11-21T22:02:54,838 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:02:54,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure cfabebbcb8733987892156b399f30287, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:02:54,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, UNASSIGN because future has completed 2024-11-21T22:02:54,839 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:02:54,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1104283653669c50fb0250c5091209e0, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:54,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-21T22:02:54,992 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close cfabebbcb8733987892156b399f30287 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:02:54,992 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing cfabebbcb8733987892156b399f30287, disabling compactions & flushes 2024-11-21T22:02:54,992 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. after waiting 0 ms 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing 1104283653669c50fb0250c5091209e0, disabling compactions & flushes 2024-11-21T22:02:54,992 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:54,992 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:54,993 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:54,993 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. after waiting 0 ms 2024-11-21T22:02:54,993 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:54,997 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T22:02:54,997 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T22:02:54,998 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:54,998 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:54,998 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287. 2024-11-21T22:02:54,998 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0. 2024-11-21T22:02:54,998 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for 1104283653669c50fb0250c5091209e0: Waiting for close lock at 1732226574992Running coprocessor pre-close hooks at 1732226574992Disabling compacts and flushes for region at 1732226574992Disabling writes for close at 1732226574993 (+1 ms)Writing region close event to WAL at 1732226574993Running coprocessor post-close hooks at 1732226574998 (+5 ms)Closed at 1732226574998 2024-11-21T22:02:54,998 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for cfabebbcb8733987892156b399f30287: Waiting for close lock at 1732226574992Running coprocessor pre-close hooks at 1732226574992Disabling compacts and flushes for region at 1732226574992Disabling writes for close at 1732226574992Writing region close event to WAL at 1732226574993 (+1 ms)Running coprocessor post-close hooks at 1732226574998 (+5 ms)Closed at 1732226574998 2024-11-21T22:02:55,000 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed cfabebbcb8733987892156b399f30287 2024-11-21T22:02:55,001 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=cfabebbcb8733987892156b399f30287, regionState=CLOSED 2024-11-21T22:02:55,001 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed 1104283653669c50fb0250c5091209e0 2024-11-21T22:02:55,002 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=1104283653669c50fb0250c5091209e0, regionState=CLOSED 2024-11-21T22:02:55,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure cfabebbcb8733987892156b399f30287, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:02:55,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1104283653669c50fb0250c5091209e0, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:55,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-21T22:02:55,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure cfabebbcb8733987892156b399f30287, server=2a884b411602,36419,1732226515183 in 165 msec 2024-11-21T22:02:55,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=cfabebbcb8733987892156b399f30287, UNASSIGN in 172 msec 2024-11-21T22:02:55,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-21T22:02:55,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure 1104283653669c50fb0250c5091209e0, server=2a884b411602,44629,1732226515126 in 165 msec 2024-11-21T22:02:55,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-21T22:02:55,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1104283653669c50fb0250c5091209e0, UNASSIGN in 173 msec 2024-11-21T22:02:55,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-21T22:02:55,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 177 msec 2024-11-21T22:02:55,012 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226575012"}]},"ts":"1732226575012"} 2024-11-21T22:02:55,014 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-21T22:02:55,014 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-21T22:02:55,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 188 msec 2024-11-21T22:02:55,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-21T22:02:55,151 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-21T22:02:55,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-21T22:02:55,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:55,154 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-21T22:02:55,155 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:55,158 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-21T22:02:55,161 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0 2024-11-21T22:02:55,161 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287 2024-11-21T22:02:55,163 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/recovered.edits] 2024-11-21T22:02:55,163 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/recovered.edits] 2024-11-21T22:02:55,166 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf/04f2990e879f4cb6a699b9ea14d50f49 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/cf/04f2990e879f4cb6a699b9ea14d50f49 2024-11-21T22:02:55,166 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf/35ed4b0104e84a67847451ef21c03e04 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/cf/35ed4b0104e84a67847451ef21c03e04 2024-11-21T22:02:55,170 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/recovered.edits/8.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287/recovered.edits/8.seqid 2024-11-21T22:02:55,170 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/recovered.edits/8.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0/recovered.edits/8.seqid 2024-11-21T22:02:55,171 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/1104283653669c50fb0250c5091209e0 2024-11-21T22:02:55,171 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportWithResetTtl/cfabebbcb8733987892156b399f30287 2024-11-21T22:02:55,171 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-21T22:02:55,173 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,176 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,177 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,177 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,177 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-21T22:02:55,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,178 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,180 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-21T22:02:55,181 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:55,182 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-21T22:02:55,182 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226575182"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:55,182 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226575182"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,185 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T22:02:55,185 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,185 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:02:55,185 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1104283653669c50fb0250c5091209e0, NAME => 'testExportWithResetTtl,,1732226554853.1104283653669c50fb0250c5091209e0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => cfabebbcb8733987892156b399f30287, NAME => 'testExportWithResetTtl,1,1732226554853.cfabebbcb8733987892156b399f30287.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:02:55,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-21T22:02:55,185 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-21T22:02:55,186 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226575185"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:55,186 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:55,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:55,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:55,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:55,189 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-21T22:02:55,190 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T22:02:55,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 38 msec 2024-11-21T22:02:55,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-21T22:02:55,292 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-21T22:02:55,292 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-21T22:02:55,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-21T22:02:55,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-21T22:02:55,296 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226575296"}]},"ts":"1732226575296"} 2024-11-21T22:02:55,298 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-21T22:02:55,298 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-21T22:02:55,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-21T22:02:55,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, UNASSIGN}] 2024-11-21T22:02:55,304 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, UNASSIGN 2024-11-21T22:02:55,305 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, UNASSIGN 2024-11-21T22:02:55,307 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=63ab513b366f9d889c2e0bf42478ccae, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:55,307 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=ccc8827a3646c7a343b322db3b424528, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:55,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, UNASSIGN because future has completed 2024-11-21T22:02:55,311 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:02:55,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure ccc8827a3646c7a343b322db3b424528, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:02:55,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, UNASSIGN because future has completed 2024-11-21T22:02:55,315 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:02:55,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 63ab513b366f9d889c2e0bf42478ccae, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:55,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-21T22:02:55,467 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:55,468 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:02:55,468 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing ccc8827a3646c7a343b322db3b424528, disabling compactions & flushes 2024-11-21T22:02:55,468 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:55,468 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:55,468 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. after waiting 0 ms 2024-11-21T22:02:55,468 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:55,471 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:55,471 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:02:55,471 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing 63ab513b366f9d889c2e0bf42478ccae, disabling compactions & flushes 2024-11-21T22:02:55,471 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:55,471 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:55,471 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. after waiting 0 ms 2024-11-21T22:02:55,471 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:55,476 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:02:55,476 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:02:55,477 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:55,477 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:02:55,477 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae. 2024-11-21T22:02:55,477 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528. 2024-11-21T22:02:55,477 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for 63ab513b366f9d889c2e0bf42478ccae: Waiting for close lock at 1732226575471Running coprocessor pre-close hooks at 1732226575471Disabling compacts and flushes for region at 1732226575471Disabling writes for close at 1732226575471Writing region close event to WAL at 1732226575474 (+3 ms)Running coprocessor post-close hooks at 1732226575477 (+3 ms)Closed at 1732226575477 2024-11-21T22:02:55,477 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for ccc8827a3646c7a343b322db3b424528: Waiting for close lock at 1732226575468Running coprocessor pre-close hooks at 1732226575468Disabling compacts and flushes for region at 1732226575468Disabling writes for close at 1732226575468Writing region close event to WAL at 1732226575471 (+3 ms)Running coprocessor post-close hooks at 1732226575477 (+6 ms)Closed at 1732226575477 2024-11-21T22:02:55,480 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:55,480 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=ccc8827a3646c7a343b322db3b424528, regionState=CLOSED 2024-11-21T22:02:55,482 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed 63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:55,484 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=63ab513b366f9d889c2e0bf42478ccae, regionState=CLOSED 2024-11-21T22:02:55,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure ccc8827a3646c7a343b322db3b424528, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:02:55,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 63ab513b366f9d889c2e0bf42478ccae, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:55,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=54 2024-11-21T22:02:55,494 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure ccc8827a3646c7a343b322db3b424528, server=2a884b411602,41399,1732226514979 in 174 msec 2024-11-21T22:02:55,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=53 2024-11-21T22:02:55,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure 63ab513b366f9d889c2e0bf42478ccae, server=2a884b411602,44629,1732226515126 in 178 msec 2024-11-21T22:02:55,496 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ccc8827a3646c7a343b322db3b424528, UNASSIGN in 191 msec 2024-11-21T22:02:55,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-21T22:02:55,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=63ab513b366f9d889c2e0bf42478ccae, UNASSIGN in 193 msec 2024-11-21T22:02:55,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-21T22:02:55,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 198 msec 2024-11-21T22:02:55,501 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226575501"}]},"ts":"1732226575501"} 2024-11-21T22:02:55,504 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-21T22:02:55,504 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-21T22:02:55,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 213 msec 2024-11-21T22:02:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-21T22:02:55,612 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T22:02:55,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-21T22:02:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,616 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-21T22:02:55,617 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,622 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-21T22:02:55,622 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:55,622 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:55,624 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/recovered.edits] 2024-11-21T22:02:55,624 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/recovered.edits] 2024-11-21T22:02:55,627 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf/58612bd412c449c1af54c3e77aa8d8e3 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/cf/58612bd412c449c1af54c3e77aa8d8e3 2024-11-21T22:02:55,627 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf/26c416e42d324faca9f7f6354bc83a19 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/cf/26c416e42d324faca9f7f6354bc83a19 2024-11-21T22:02:55,630 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae/recovered.edits/9.seqid 2024-11-21T22:02:55,630 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528/recovered.edits/9.seqid 2024-11-21T22:02:55,631 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/ccc8827a3646c7a343b322db3b424528 2024-11-21T22:02:55,631 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithResetTtl/63ab513b366f9d889c2e0bf42478ccae 2024-11-21T22:02:55,631 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-21T22:02:55,634 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,644 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-21T22:02:55,647 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-21T22:02:55,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,647 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,648 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,648 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,648 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,649 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,649 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-21T22:02:55,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T22:02:55,649 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226575649"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:55,649 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226575649"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:55,652 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:02:55,652 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 63ab513b366f9d889c2e0bf42478ccae, NAME => 'testtb-testExportWithResetTtl,,1732226553476.63ab513b366f9d889c2e0bf42478ccae.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ccc8827a3646c7a343b322db3b424528, NAME => 'testtb-testExportWithResetTtl,1,1732226553476.ccc8827a3646c7a343b322db3b424528.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:02:55,652 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-21T22:02:55,652 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226575652"}]},"ts":"9223372036854775807"} 2024-11-21T22:02:55,657 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-21T22:02:55,659 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T22:02:55,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,660 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T22:02:55,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,660 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-21T22:02:55,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 46 msec 2024-11-21T22:02:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-21T22:02:55,771 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-21T22:02:55,771 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T22:02:55,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-21T22:02:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-21T22:02:55,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-21T22:02:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-21T22:02:55,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-21T22:02:55,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-21T22:02:55,823 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=782 (was 772) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1894588569_1 at /127.0.0.1:58070 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:34503 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:47416 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 115373) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45437 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37543 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:45437 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42423 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:58092 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:39704 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1894588569_1 at /127.0.0.1:39670 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2070 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=807 (was 806) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=558 (was 541) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=703 (was 1236) 2024-11-21T22:02:55,823 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=782 is superior to 500 2024-11-21T22:02:55,846 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=782, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=558, ProcessCount=19, AvailableMemoryMB=702 2024-11-21T22:02:55,846 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=782 is superior to 500 2024-11-21T22:02:55,848 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:02:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:02:55,856 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:02:55,856 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:55,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-21T22:02:55,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T22:02:55,858 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:02:55,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741953_1129 (size=407) 2024-11-21T22:02:55,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741953_1129 (size=407) 2024-11-21T22:02:55,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741953_1129 (size=407) 2024-11-21T22:02:55,918 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7d1d8d00636267055df78265b721672e, NAME => 'testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:55,920 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0509275dd453075d266faf565c5c5c3f, NAME => 'testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:55,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T22:02:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741954_1130 (size=68) 2024-11-21T22:02:55,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741954_1130 (size=68) 2024-11-21T22:02:55,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741955_1131 (size=68) 2024-11-21T22:02:55,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741954_1130 (size=68) 2024-11-21T22:02:55,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:55,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 7d1d8d00636267055df78265b721672e, disabling compactions & flushes 2024-11-21T22:02:55,989 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:55,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:55,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. after waiting 0 ms 2024-11-21T22:02:55,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:55,989 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:55,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7d1d8d00636267055df78265b721672e: Waiting for close lock at 1732226575989Disabling compacts and flushes for region at 1732226575989Disabling writes for close at 1732226575989Writing region close event to WAL at 1732226575989Closed at 1732226575989 2024-11-21T22:02:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741955_1131 (size=68) 2024-11-21T22:02:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741955_1131 (size=68) 2024-11-21T22:02:55,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:55,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 0509275dd453075d266faf565c5c5c3f, disabling compactions & flushes 2024-11-21T22:02:55,993 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:55,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:55,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. after waiting 0 ms 2024-11-21T22:02:55,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:55,993 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:55,993 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0509275dd453075d266faf565c5c5c3f: Waiting for close lock at 1732226575993Disabling compacts and flushes for region at 1732226575993Disabling writes for close at 1732226575993Writing region close event to WAL at 1732226575993Closed at 1732226575993 2024-11-21T22:02:55,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:02:55,995 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732226575995"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226575995"}]},"ts":"1732226575995"} 2024-11-21T22:02:55,995 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732226575995"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226575995"}]},"ts":"1732226575995"} 2024-11-21T22:02:55,999 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:02:56,000 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:02:56,001 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226576000"}]},"ts":"1732226576000"} 2024-11-21T22:02:56,003 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-21T22:02:56,004 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:02:56,006 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:02:56,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:02:56,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:02:56,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:02:56,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:02:56,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:02:56,007 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:02:56,007 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:02:56,007 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:02:56,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:02:56,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, ASSIGN}] 2024-11-21T22:02:56,010 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, ASSIGN 2024-11-21T22:02:56,010 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, ASSIGN 2024-11-21T22:02:56,016 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:02:56,017 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:02:56,167 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:02:56,167 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=0509275dd453075d266faf565c5c5c3f, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:56,167 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=7d1d8d00636267055df78265b721672e, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:56,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, ASSIGN because future has completed 2024-11-21T22:02:56,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d1d8d00636267055df78265b721672e, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:02:56,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, ASSIGN because future has completed 2024-11-21T22:02:56,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0509275dd453075d266faf565c5c5c3f, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:02:56,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T22:02:56,324 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:56,325 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => 7d1d8d00636267055df78265b721672e, NAME => 'testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:02:56,325 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. service=AccessControlService 2024-11-21T22:02:56,325 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:56,325 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => 0509275dd453075d266faf565c5c5c3f, NAME => 'testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:02:56,325 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:56,325 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,325 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. service=AccessControlService 2024-11-21T22:02:56,325 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:56,326 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,326 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:02:56,326 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,326 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,326 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:02:56,326 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,326 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,327 INFO [StoreOpener-7d1d8d00636267055df78265b721672e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,327 INFO [StoreOpener-0509275dd453075d266faf565c5c5c3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,329 INFO [StoreOpener-7d1d8d00636267055df78265b721672e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d1d8d00636267055df78265b721672e columnFamilyName cf 2024-11-21T22:02:56,329 INFO [StoreOpener-0509275dd453075d266faf565c5c5c3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0509275dd453075d266faf565c5c5c3f columnFamilyName cf 2024-11-21T22:02:56,329 DEBUG [StoreOpener-7d1d8d00636267055df78265b721672e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:56,329 DEBUG [StoreOpener-0509275dd453075d266faf565c5c5c3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:02:56,329 INFO [StoreOpener-7d1d8d00636267055df78265b721672e-1 {}] regionserver.HStore(327): Store=7d1d8d00636267055df78265b721672e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:56,329 INFO [StoreOpener-0509275dd453075d266faf565c5c5c3f-1 {}] regionserver.HStore(327): Store=0509275dd453075d266faf565c5c5c3f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:02:56,329 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,329 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,330 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,330 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,330 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,331 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,331 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,331 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,331 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,331 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,332 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,333 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,334 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:56,334 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:02:56,335 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened 0509275dd453075d266faf565c5c5c3f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65432829, jitterRate=-0.024974867701530457}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:56,335 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,335 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened 7d1d8d00636267055df78265b721672e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62664751, jitterRate=-0.06622244417667389}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:02:56,335 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,335 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for 0509275dd453075d266faf565c5c5c3f: Running coprocessor pre-open hook at 1732226576326Writing region info on filesystem at 1732226576326Initializing all the Stores at 1732226576327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226576327Cleaning up temporary data from old regions at 1732226576331 (+4 ms)Running coprocessor post-open hooks at 1732226576335 (+4 ms)Region opened successfully at 1732226576335 2024-11-21T22:02:56,335 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for 7d1d8d00636267055df78265b721672e: Running coprocessor pre-open hook at 1732226576326Writing region info on filesystem at 1732226576326Initializing all the Stores at 1732226576327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226576327Cleaning up temporary data from old regions at 1732226576331 (+4 ms)Running coprocessor post-open hooks at 1732226576335 (+4 ms)Region opened successfully at 1732226576335 2024-11-21T22:02:56,336 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f., pid=62, masterSystemTime=1732226576322 2024-11-21T22:02:56,336 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e., pid=61, masterSystemTime=1732226576321 2024-11-21T22:02:56,338 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:56,338 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:56,339 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=7d1d8d00636267055df78265b721672e, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:02:56,339 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:56,339 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:56,340 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=0509275dd453075d266faf565c5c5c3f, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:02:56,340 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=2a884b411602,44629,1732226515126, table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T22:02:56,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d1d8d00636267055df78265b721672e, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:02:56,341 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=2a884b411602,41399,1732226514979, table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T22:02:56,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0509275dd453075d266faf565c5c5c3f, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:02:56,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-21T22:02:56,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure 7d1d8d00636267055df78265b721672e, server=2a884b411602,44629,1732226515126 in 173 msec 2024-11-21T22:02:56,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=60 2024-11-21T22:02:56,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 0509275dd453075d266faf565c5c5c3f, server=2a884b411602,41399,1732226514979 in 173 msec 2024-11-21T22:02:56,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, ASSIGN in 337 msec 2024-11-21T22:02:56,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-21T22:02:56,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, ASSIGN in 338 msec 2024-11-21T22:02:56,347 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:02:56,347 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226576347"}]},"ts":"1732226576347"} 2024-11-21T22:02:56,349 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-21T22:02:56,350 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:02:56,350 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-21T22:02:56,353 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T22:02:56,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:56,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:56,401 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:56,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:02:56,413 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:56,413 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:56,413 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:56,413 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:02:56,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 565 msec 2024-11-21T22:02:56,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T22:02:56,492 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T22:02:56,492 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-21T22:02:56,492 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:56,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-21T22:02:56,496 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:56,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-21T22:02:56,496 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:02:56,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:02:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226576500 (current time:1732226576500). 2024-11-21T22:02:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:02:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-21T22:02:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d5c69de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:56,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:56,501 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:56,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:56,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:56,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28b44fe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:56,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:56,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,503 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:56,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10905051, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:56,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:56,506 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:56,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:56,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,508 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d46ff7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:56,510 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:56,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:56,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:56,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@600da2f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:56,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:56,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,512 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37600, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:56,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c0a985b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:56,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:56,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:56,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:56,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:56,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:56,519 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:56,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,521 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T22:02:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:56,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:02:56,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-21T22:02:56,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-21T22:02:56,525 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:56,527 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:56,531 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:56,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741956_1132 (size=170) 2024-11-21T22:02:56,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741956_1132 (size=170) 2024-11-21T22:02:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741956_1132 (size=170) 2024-11-21T22:02:56,547 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:56,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f}] 2024-11-21T22:02:56,550 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,550 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-21T22:02:56,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-21T22:02:56,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-21T22:02:56,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:56,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:56,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for 7d1d8d00636267055df78265b721672e: 2024-11-21T22:02:56,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 0509275dd453075d266faf565c5c5c3f: 2024-11-21T22:02:56,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. for emptySnaptb0-testExportFileSystemState completed. 2024-11-21T22:02:56,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. for emptySnaptb0-testExportFileSystemState completed. 2024-11-21T22:02:56,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-21T22:02:56,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-21T22:02:56,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:56,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:56,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:02:56,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:02:56,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741957_1133 (size=71) 2024-11-21T22:02:56,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741957_1133 (size=71) 2024-11-21T22:02:56,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741957_1133 (size=71) 2024-11-21T22:02:56,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:56,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741958_1134 (size=71) 2024-11-21T22:02:56,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-21T22:02:56,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741958_1134 (size=71) 2024-11-21T22:02:56,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741958_1134 (size=71) 2024-11-21T22:02:56,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:56,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-21T22:02:56,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-21T22:02:56,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,720 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-21T22:02:56,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,732 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:56,737 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f in 176 msec 2024-11-21T22:02:56,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-11-21T22:02:56,739 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:56,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e in 189 msec 2024-11-21T22:02:56,741 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:56,742 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:56,742 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-21T22:02:56,743 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-21T22:02:56,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741959_1135 (size=552) 2024-11-21T22:02:56,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741959_1135 (size=552) 2024-11-21T22:02:56,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741959_1135 (size=552) 2024-11-21T22:02:56,773 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:56,781 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:56,782 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-21T22:02:56,784 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:56,784 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-21T22:02:56,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 262 msec 2024-11-21T22:02:56,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-21T22:02:56,841 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T22:02:56,847 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='083545fcf59beb5a51957823a7162d917', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:02:56,849 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='11528387bb4e96b6c784245ca001b33c1', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:56,849 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='2113b335dc2cd67f50037a808f0c0c46e', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:56,850 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='320d819a2e077d4e0bfee668fd7d44697', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:56,852 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='57a944f3e4bed813794863e69384bb49f', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:56,853 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='4dbb628a8658628594460d20bc573f374', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:02:56,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:56,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:02:56,861 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:02:56,865 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-21T22:02:56,865 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:56,865 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:02:56,868 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:02:56,875 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:02:56,884 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:02:56,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:02:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226576888 (current time:1732226576888). 2024-11-21T22:02:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:02:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-21T22:02:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:02:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63372685, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:56,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:56,890 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:56,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:56,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:56,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@211707a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:56,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:56,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,892 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37630, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:56,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bc40ac8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:56,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:56,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:56,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:56,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,897 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35bdd0fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:02:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:02:56,899 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:02:56,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:02:56,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:02:56,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@697c8f06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:02:56,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:02:56,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,901 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:02:56,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73ab81f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:02:56,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:02:56,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:02:56,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:56,905 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60778, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:56,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:02:56,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:02:56,909 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:02:56,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:02:56,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:02:56,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:02:56,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T22:02:56,912 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:02:56,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:02:56,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:02:56,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-21T22:02:56,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T22:02:56,914 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:02:56,916 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:02:56,919 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:02:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741960_1136 (size=165) 2024-11-21T22:02:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741960_1136 (size=165) 2024-11-21T22:02:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741960_1136 (size=165) 2024-11-21T22:02:56,933 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:02:56,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f}] 2024-11-21T22:02:56,934 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:56,934 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:57,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T22:02:57,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-21T22:02:57,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:57,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-21T22:02:57,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:57,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 0509275dd453075d266faf565c5c5c3f 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-21T22:02:57,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing 7d1d8d00636267055df78265b721672e 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-21T22:02:57,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/.tmp/cf/f33cf7deea9a431182cf45563c5ff59b is 71, key is 02c797c9ad8ce09244f14af248ea3bad/cf:q/1732226576856/Put/seqid=0 2024-11-21T22:02:57,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/.tmp/cf/2fcdb666e4e34015a31bc0f28f76bc8d is 71, key is 115f6a01f6ca49aa3f0ad9b49da1f5e4/cf:q/1732226576859/Put/seqid=0 2024-11-21T22:02:57,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741961_1137 (size=5422) 2024-11-21T22:02:57,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741961_1137 (size=5422) 2024-11-21T22:02:57,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741961_1137 (size=5422) 2024-11-21T22:02:57,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/.tmp/cf/f33cf7deea9a431182cf45563c5ff59b 2024-11-21T22:02:57,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741962_1138 (size=8190) 2024-11-21T22:02:57,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741962_1138 (size=8190) 2024-11-21T22:02:57,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741962_1138 (size=8190) 2024-11-21T22:02:57,125 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/.tmp/cf/2fcdb666e4e34015a31bc0f28f76bc8d 2024-11-21T22:02:57,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/.tmp/cf/f33cf7deea9a431182cf45563c5ff59b as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf/f33cf7deea9a431182cf45563c5ff59b 2024-11-21T22:02:57,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/.tmp/cf/2fcdb666e4e34015a31bc0f28f76bc8d as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf/2fcdb666e4e34015a31bc0f28f76bc8d 2024-11-21T22:02:57,147 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf/f33cf7deea9a431182cf45563c5ff59b, entries=5, sequenceid=6, filesize=5.3 K 2024-11-21T22:02:57,149 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 7d1d8d00636267055df78265b721672e in 62ms, sequenceid=6, compaction requested=false 2024-11-21T22:02:57,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-21T22:02:57,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for 7d1d8d00636267055df78265b721672e: 2024-11-21T22:02:57,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. for snaptb0-testExportFileSystemState completed. 2024-11-21T22:02:57,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-21T22:02:57,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:57,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf/f33cf7deea9a431182cf45563c5ff59b] hfiles 2024-11-21T22:02:57,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf/f33cf7deea9a431182cf45563c5ff59b for snapshot=snaptb0-testExportFileSystemState 2024-11-21T22:02:57,153 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf/2fcdb666e4e34015a31bc0f28f76bc8d, entries=45, sequenceid=6, filesize=8.0 K 2024-11-21T22:02:57,155 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 0509275dd453075d266faf565c5c5c3f in 67ms, sequenceid=6, compaction requested=false 2024-11-21T22:02:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 0509275dd453075d266faf565c5c5c3f: 2024-11-21T22:02:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. for snaptb0-testExportFileSystemState completed. 2024-11-21T22:02:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-21T22:02:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:02:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf/2fcdb666e4e34015a31bc0f28f76bc8d] hfiles 2024-11-21T22:02:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf/2fcdb666e4e34015a31bc0f28f76bc8d for snapshot=snaptb0-testExportFileSystemState 2024-11-21T22:02:57,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741964_1140 (size=110) 2024-11-21T22:02:57,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741964_1140 (size=110) 2024-11-21T22:02:57,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741964_1140 (size=110) 2024-11-21T22:02:57,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:02:57,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-21T22:02:57,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-21T22:02:57,173 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:57,173 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:02:57,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0509275dd453075d266faf565c5c5c3f in 241 msec 2024-11-21T22:02:57,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741963_1139 (size=110) 2024-11-21T22:02:57,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741963_1139 (size=110) 2024-11-21T22:02:57,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741963_1139 (size=110) 2024-11-21T22:02:57,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:02:57,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-21T22:02:57,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-21T22:02:57,184 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:57,184 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e 2024-11-21T22:02:57,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=66 2024-11-21T22:02:57,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d1d8d00636267055df78265b721672e in 252 msec 2024-11-21T22:02:57,198 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:02:57,199 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:02:57,201 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:02:57,201 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-21T22:02:57,202 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-21T22:02:57,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741965_1141 (size=630) 2024-11-21T22:02:57,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741965_1141 (size=630) 2024-11-21T22:02:57,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741965_1141 (size=630) 2024-11-21T22:02:57,218 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:02:57,225 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:02:57,226 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-21T22:02:57,228 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:02:57,228 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-21T22:02:57,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 317 msec 2024-11-21T22:02:57,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T22:02:57,232 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T22:02:57,232 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232 2024-11-21T22:02:57,232 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:57,263 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:02:57,263 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-21T22:02:57,265 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:02:57,271 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-21T22:02:57,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741966_1142 (size=165) 2024-11-21T22:02:57,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741966_1142 (size=165) 2024-11-21T22:02:57,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741966_1142 (size=165) 2024-11-21T22:02:57,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741967_1143 (size=630) 2024-11-21T22:02:57,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741967_1143 (size=630) 2024-11-21T22:02:57,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741967_1143 (size=630) 2024-11-21T22:02:57,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:57,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:57,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,026 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 807d59511a09f362346016a4b356021c changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:02:58,027 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0509275dd453075d266faf565c5c5c3f changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:02:58,027 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7d1d8d00636267055df78265b721672e changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:02:58,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-6083477743101178284.jar 2024-11-21T22:02:58,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-6347522238698803122.jar 2024-11-21T22:02:58,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:02:58,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:02:58,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:02:58,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:02:58,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:02:58,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:02:58,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:02:58,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:02:58,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:02:58,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:02:58,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:02:58,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:02:58,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:58,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:58,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:58,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:58,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:02:58,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:58,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:02:58,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741968_1144 (size=131440) 2024-11-21T22:02:58,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741968_1144 (size=131440) 2024-11-21T22:02:58,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741968_1144 (size=131440) 2024-11-21T22:02:58,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741969_1145 (size=4188619) 2024-11-21T22:02:58,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741969_1145 (size=4188619) 2024-11-21T22:02:58,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741969_1145 (size=4188619) 2024-11-21T22:02:58,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741970_1146 (size=1323991) 2024-11-21T22:02:58,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741970_1146 (size=1323991) 2024-11-21T22:02:58,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741970_1146 (size=1323991) 2024-11-21T22:02:58,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741971_1147 (size=903739) 2024-11-21T22:02:58,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741971_1147 (size=903739) 2024-11-21T22:02:58,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741971_1147 (size=903739) 2024-11-21T22:02:58,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741972_1148 (size=8360083) 2024-11-21T22:02:58,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741972_1148 (size=8360083) 2024-11-21T22:02:58,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741972_1148 (size=8360083) 2024-11-21T22:02:58,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741973_1149 (size=1877034) 2024-11-21T22:02:58,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741973_1149 (size=1877034) 2024-11-21T22:02:58,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741973_1149 (size=1877034) 2024-11-21T22:02:58,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741974_1150 (size=77835) 2024-11-21T22:02:58,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741974_1150 (size=77835) 2024-11-21T22:02:58,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741974_1150 (size=77835) 2024-11-21T22:02:58,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741975_1151 (size=30949) 2024-11-21T22:02:58,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741975_1151 (size=30949) 2024-11-21T22:02:58,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741975_1151 (size=30949) 2024-11-21T22:02:58,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741976_1152 (size=1597347) 2024-11-21T22:02:58,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741976_1152 (size=1597347) 2024-11-21T22:02:58,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741976_1152 (size=1597347) 2024-11-21T22:02:58,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741977_1153 (size=4695811) 2024-11-21T22:02:58,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741977_1153 (size=4695811) 2024-11-21T22:02:58,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741977_1153 (size=4695811) 2024-11-21T22:02:58,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741978_1154 (size=232957) 2024-11-21T22:02:58,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741978_1154 (size=232957) 2024-11-21T22:02:58,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741978_1154 (size=232957) 2024-11-21T22:02:58,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741979_1155 (size=127628) 2024-11-21T22:02:58,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741979_1155 (size=127628) 2024-11-21T22:02:58,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741979_1155 (size=127628) 2024-11-21T22:02:59,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741980_1156 (size=20406) 2024-11-21T22:02:59,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741980_1156 (size=20406) 2024-11-21T22:02:59,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741980_1156 (size=20406) 2024-11-21T22:02:59,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741981_1157 (size=5175431) 2024-11-21T22:02:59,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741981_1157 (size=5175431) 2024-11-21T22:02:59,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741981_1157 (size=5175431) 2024-11-21T22:02:59,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741982_1158 (size=217634) 2024-11-21T22:02:59,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741982_1158 (size=217634) 2024-11-21T22:02:59,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741982_1158 (size=217634) 2024-11-21T22:02:59,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741983_1159 (size=440656) 2024-11-21T22:02:59,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741983_1159 (size=440656) 2024-11-21T22:02:59,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741983_1159 (size=440656) 2024-11-21T22:02:59,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741984_1160 (size=1832290) 2024-11-21T22:02:59,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741984_1160 (size=1832290) 2024-11-21T22:02:59,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741984_1160 (size=1832290) 2024-11-21T22:02:59,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741985_1161 (size=322274) 2024-11-21T22:02:59,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741985_1161 (size=322274) 2024-11-21T22:02:59,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741985_1161 (size=322274) 2024-11-21T22:02:59,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741986_1162 (size=503880) 2024-11-21T22:02:59,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741986_1162 (size=503880) 2024-11-21T22:02:59,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741986_1162 (size=503880) 2024-11-21T22:02:59,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741987_1163 (size=29229) 2024-11-21T22:02:59,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741987_1163 (size=29229) 2024-11-21T22:02:59,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741987_1163 (size=29229) 2024-11-21T22:02:59,300 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0002_000001 (auth:SIMPLE) from 127.0.0.1:60904 2024-11-21T22:02:59,311 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0002/container_1732226522247_0002_01_000001/launch_container.sh] 2024-11-21T22:02:59,311 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0002/container_1732226522247_0002_01_000001/container_tokens] 2024-11-21T22:02:59,311 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0002/container_1732226522247_0002_01_000001/sysfs] 2024-11-21T22:02:59,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741988_1164 (size=24096) 2024-11-21T22:02:59,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741988_1164 (size=24096) 2024-11-21T22:02:59,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741988_1164 (size=24096) 2024-11-21T22:02:59,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741989_1165 (size=111872) 2024-11-21T22:02:59,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741989_1165 (size=111872) 2024-11-21T22:02:59,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741989_1165 (size=111872) 2024-11-21T22:02:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741990_1166 (size=6424742) 2024-11-21T22:02:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741990_1166 (size=6424742) 2024-11-21T22:02:59,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741990_1166 (size=6424742) 2024-11-21T22:02:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741991_1167 (size=45609) 2024-11-21T22:02:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741991_1167 (size=45609) 2024-11-21T22:02:59,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741991_1167 (size=45609) 2024-11-21T22:02:59,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741992_1168 (size=136454) 2024-11-21T22:02:59,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741992_1168 (size=136454) 2024-11-21T22:02:59,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741992_1168 (size=136454) 2024-11-21T22:02:59,697 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:02:59,699 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-21T22:02:59,701 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:02:59,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741993_1169 (size=344) 2024-11-21T22:02:59,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741993_1169 (size=344) 2024-11-21T22:02:59,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741993_1169 (size=344) 2024-11-21T22:02:59,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741994_1170 (size=15) 2024-11-21T22:02:59,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741994_1170 (size=15) 2024-11-21T22:02:59,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741994_1170 (size=15) 2024-11-21T22:02:59,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741995_1171 (size=303738) 2024-11-21T22:02:59,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741995_1171 (size=303738) 2024-11-21T22:02:59,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741995_1171 (size=303738) 2024-11-21T22:02:59,852 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:02:59,852 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:00,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0003_000001 (auth:SIMPLE) from 127.0.0.1:60910 2024-11-21T22:03:00,579 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:03:04,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-21T22:03:04,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-21T22:03:04,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-21T22:03:04,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-21T22:03:06,160 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0003_000001 (auth:SIMPLE) from 127.0.0.1:39170 2024-11-21T22:03:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741996_1172 (size=349388) 2024-11-21T22:03:06,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741996_1172 (size=349388) 2024-11-21T22:03:06,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741996_1172 (size=349388) 2024-11-21T22:03:08,387 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0003_000001 (auth:SIMPLE) from 127.0.0.1:49250 2024-11-21T22:03:09,927 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:03:11,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741997_1173 (size=8190) 2024-11-21T22:03:11,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741997_1173 (size=8190) 2024-11-21T22:03:11,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741997_1173 (size=8190) 2024-11-21T22:03:11,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741998_1174 (size=5422) 2024-11-21T22:03:11,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741998_1174 (size=5422) 2024-11-21T22:03:11,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741998_1174 (size=5422) 2024-11-21T22:03:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741999_1175 (size=17422) 2024-11-21T22:03:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741999_1175 (size=17422) 2024-11-21T22:03:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741999_1175 (size=17422) 2024-11-21T22:03:11,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742000_1176 (size=465) 2024-11-21T22:03:11,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742000_1176 (size=465) 2024-11-21T22:03:11,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742000_1176 (size=465) 2024-11-21T22:03:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742001_1177 (size=17422) 2024-11-21T22:03:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742001_1177 (size=17422) 2024-11-21T22:03:11,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742001_1177 (size=17422) 2024-11-21T22:03:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742002_1178 (size=349388) 2024-11-21T22:03:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742002_1178 (size=349388) 2024-11-21T22:03:11,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742002_1178 (size=349388) 2024-11-21T22:03:11,227 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0003/container_1732226522247_0003_01_000002/launch_container.sh] 2024-11-21T22:03:11,227 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0003/container_1732226522247_0003_01_000002/container_tokens] 2024-11-21T22:03:11,227 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0003/container_1732226522247_0003_01_000002/sysfs] 2024-11-21T22:03:11,242 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0003_000001 (auth:SIMPLE) from 127.0.0.1:49254 2024-11-21T22:03:12,994 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:03:12,997 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:03:13,006 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-21T22:03:13,006 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:03:13,007 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:03:13,007 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-21T22:03:13,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-21T22:03:13,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-21T22:03:13,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-21T22:03:13,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-21T22:03:13,008 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226577232/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-21T22:03:13,018 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-21T22:03:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-21T22:03:13,025 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226593025"}]},"ts":"1732226593025"} 2024-11-21T22:03:13,028 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-21T22:03:13,028 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-21T22:03:13,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-21T22:03:13,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, UNASSIGN}] 2024-11-21T22:03:13,035 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, UNASSIGN 2024-11-21T22:03:13,035 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, UNASSIGN 2024-11-21T22:03:13,037 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=0509275dd453075d266faf565c5c5c3f, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:03:13,038 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=7d1d8d00636267055df78265b721672e, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:13,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, UNASSIGN because future has completed 2024-11-21T22:03:13,040 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:03:13,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0509275dd453075d266faf565c5c5c3f, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:03:13,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, UNASSIGN because future has completed 2024-11-21T22:03:13,042 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:03:13,042 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d1d8d00636267055df78265b721672e, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-21T22:03:13,195 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:03:13,195 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:03:13,195 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing 0509275dd453075d266faf565c5c5c3f, disabling compactions & flushes 2024-11-21T22:03:13,195 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:03:13,195 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:03:13,195 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. after waiting 0 ms 2024-11-21T22:03:13,195 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:03:13,197 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close 7d1d8d00636267055df78265b721672e 2024-11-21T22:03:13,197 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing 7d1d8d00636267055df78265b721672e, disabling compactions & flushes 2024-11-21T22:03:13,198 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. after waiting 0 ms 2024-11-21T22:03:13,198 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:03:13,203 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:03:13,203 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:03:13,203 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:03:13,204 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:03:13,204 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e. 2024-11-21T22:03:13,204 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f. 2024-11-21T22:03:13,204 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for 0509275dd453075d266faf565c5c5c3f: Waiting for close lock at 1732226593195Running coprocessor pre-close hooks at 1732226593195Disabling compacts and flushes for region at 1732226593195Disabling writes for close at 1732226593195Writing region close event to WAL at 1732226593199 (+4 ms)Running coprocessor post-close hooks at 1732226593203 (+4 ms)Closed at 1732226593204 (+1 ms) 2024-11-21T22:03:13,204 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for 7d1d8d00636267055df78265b721672e: Waiting for close lock at 1732226593198Running coprocessor pre-close hooks at 1732226593198Disabling compacts and flushes for region at 1732226593198Disabling writes for close at 1732226593198Writing region close event to WAL at 1732226593199 (+1 ms)Running coprocessor post-close hooks at 1732226593203 (+4 ms)Closed at 1732226593204 (+1 ms) 2024-11-21T22:03:13,206 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed 0509275dd453075d266faf565c5c5c3f 2024-11-21T22:03:13,207 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=0509275dd453075d266faf565c5c5c3f, regionState=CLOSED 2024-11-21T22:03:13,207 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed 7d1d8d00636267055df78265b721672e 2024-11-21T22:03:13,207 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=7d1d8d00636267055df78265b721672e, regionState=CLOSED 2024-11-21T22:03:13,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0509275dd453075d266faf565c5c5c3f, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:03:13,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d1d8d00636267055df78265b721672e, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:13,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=71 2024-11-21T22:03:13,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=72 2024-11-21T22:03:13,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure 7d1d8d00636267055df78265b721672e, server=2a884b411602,44629,1732226515126 in 171 msec 2024-11-21T22:03:13,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 0509275dd453075d266faf565c5c5c3f, server=2a884b411602,41399,1732226514979 in 173 msec 2024-11-21T22:03:13,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7d1d8d00636267055df78265b721672e, UNASSIGN in 183 msec 2024-11-21T22:03:13,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=70 2024-11-21T22:03:13,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0509275dd453075d266faf565c5c5c3f, UNASSIGN in 183 msec 2024-11-21T22:03:13,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-21T22:03:13,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 190 msec 2024-11-21T22:03:13,224 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226593224"}]},"ts":"1732226593224"} 2024-11-21T22:03:13,226 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-21T22:03:13,226 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-21T22:03:13,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 209 msec 2024-11-21T22:03:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-21T22:03:13,341 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T22:03:13,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-21T22:03:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,343 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-21T22:03:13,344 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-21T22:03:13,349 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e 2024-11-21T22:03:13,349 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f 2024-11-21T22:03:13,351 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/recovered.edits] 2024-11-21T22:03:13,351 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/recovered.edits] 2024-11-21T22:03:13,355 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf/f33cf7deea9a431182cf45563c5ff59b to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/cf/f33cf7deea9a431182cf45563c5ff59b 2024-11-21T22:03:13,355 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf/2fcdb666e4e34015a31bc0f28f76bc8d to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/cf/2fcdb666e4e34015a31bc0f28f76bc8d 2024-11-21T22:03:13,358 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e/recovered.edits/9.seqid 2024-11-21T22:03:13,358 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f/recovered.edits/9.seqid 2024-11-21T22:03:13,358 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/7d1d8d00636267055df78265b721672e 2024-11-21T22:03:13,358 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemState/0509275dd453075d266faf565c5c5c3f 2024-11-21T22:03:13,358 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-21T22:03:13,361 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,365 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-21T22:03:13,367 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-21T22:03:13,369 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,369 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-21T22:03:13,369 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226593369"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:13,369 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226593369"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:13,372 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:03:13,372 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7d1d8d00636267055df78265b721672e, NAME => 'testtb-testExportFileSystemState,,1732226575848.7d1d8d00636267055df78265b721672e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0509275dd453075d266faf565c5c5c3f, NAME => 'testtb-testExportFileSystemState,1,1732226575848.0509275dd453075d266faf565c5c5c3f.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:03:13,372 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-21T22:03:13,372 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226593372"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:13,374 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-21T22:03:13,375 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T22:03:13,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 33 msec 2024-11-21T22:03:13,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,377 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,377 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T22:03:13,377 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T22:03:13,377 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T22:03:13,377 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T22:03:13,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,385 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,385 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T22:03:13,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-21T22:03:13,388 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-21T22:03:13,388 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T22:03:13,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-21T22:03:13,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-21T22:03:13,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-21T22:03:13,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-21T22:03:13,429 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=785 (was 782) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:43399 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43399 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-287660239_1 at /127.0.0.1:46996 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:41518 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 118257) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2671 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:39586 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-287660239_1 at /127.0.0.1:42478 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:51568 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 807), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=573 (was 558) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=545 (was 702) 2024-11-21T22:03:13,429 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=785 is superior to 500 2024-11-21T22:03:13,454 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=785, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=573, ProcessCount=19, AvailableMemoryMB=525 2024-11-21T22:03:13,454 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=785 is superior to 500 2024-11-21T22:03:13,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:03:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:13,459 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:03:13,460 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:13,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-21T22:03:13,461 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:03:13,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T22:03:13,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742003_1179 (size=404) 2024-11-21T22:03:13,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742003_1179 (size=404) 2024-11-21T22:03:13,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742003_1179 (size=404) 2024-11-21T22:03:13,491 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ce84aab708388952511fda968a3634d3, NAME => 'testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:13,491 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 81d0565906c05e3981f19964a793f6eb, NAME => 'testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:13,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742005_1181 (size=65) 2024-11-21T22:03:13,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742004_1180 (size=65) 2024-11-21T22:03:13,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742004_1180 (size=65) 2024-11-21T22:03:13,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742005_1181 (size=65) 2024-11-21T22:03:13,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742005_1181 (size=65) 2024-11-21T22:03:13,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742004_1180 (size=65) 2024-11-21T22:03:13,528 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:13,528 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:13,528 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing ce84aab708388952511fda968a3634d3, disabling compactions & flushes 2024-11-21T22:03:13,528 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 81d0565906c05e3981f19964a793f6eb, disabling compactions & flushes 2024-11-21T22:03:13,529 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,529 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. after waiting 0 ms 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. after waiting 0 ms 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,529 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,529 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 81d0565906c05e3981f19964a793f6eb: Waiting for close lock at 1732226593528Disabling compacts and flushes for region at 1732226593528Disabling writes for close at 1732226593529 (+1 ms)Writing region close event to WAL at 1732226593529Closed at 1732226593529 2024-11-21T22:03:13,529 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for ce84aab708388952511fda968a3634d3: Waiting for close lock at 1732226593528Disabling compacts and flushes for region at 1732226593528Disabling writes for close at 1732226593529 (+1 ms)Writing region close event to WAL at 1732226593529Closed at 1732226593529 2024-11-21T22:03:13,530 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:03:13,530 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732226593530"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226593530"}]},"ts":"1732226593530"} 2024-11-21T22:03:13,531 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732226593530"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226593530"}]},"ts":"1732226593530"} 2024-11-21T22:03:13,534 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:03:13,535 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:03:13,535 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226593535"}]},"ts":"1732226593535"} 2024-11-21T22:03:13,541 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-21T22:03:13,542 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:03:13,544 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:03:13,544 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:03:13,544 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:03:13,544 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:03:13,545 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:03:13,545 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:03:13,545 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:03:13,545 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:03:13,545 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:03:13,545 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:03:13,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, ASSIGN}] 2024-11-21T22:03:13,547 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, ASSIGN 2024-11-21T22:03:13,548 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, ASSIGN 2024-11-21T22:03:13,549 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:03:13,550 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:03:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T22:03:13,700 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:03:13,700 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=81d0565906c05e3981f19964a793f6eb, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:03:13,700 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=ce84aab708388952511fda968a3634d3, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:13,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, ASSIGN because future has completed 2024-11-21T22:03:13,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 81d0565906c05e3981f19964a793f6eb, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:03:13,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, ASSIGN because future has completed 2024-11-21T22:03:13,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure ce84aab708388952511fda968a3634d3, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T22:03:13,859 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,859 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => 81d0565906c05e3981f19964a793f6eb, NAME => 'testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:03:13,860 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. service=AccessControlService 2024-11-21T22:03:13,860 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:13,861 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,861 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:13,861 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,861 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,862 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,862 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => ce84aab708388952511fda968a3634d3, NAME => 'testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:03:13,862 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. service=AccessControlService 2024-11-21T22:03:13,862 INFO [StoreOpener-81d0565906c05e3981f19964a793f6eb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,863 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:13,863 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,863 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:13,863 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,863 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,864 INFO [StoreOpener-ce84aab708388952511fda968a3634d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,864 INFO [StoreOpener-81d0565906c05e3981f19964a793f6eb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 81d0565906c05e3981f19964a793f6eb columnFamilyName cf 2024-11-21T22:03:13,864 DEBUG [StoreOpener-81d0565906c05e3981f19964a793f6eb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:13,865 INFO [StoreOpener-81d0565906c05e3981f19964a793f6eb-1 {}] regionserver.HStore(327): Store=81d0565906c05e3981f19964a793f6eb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:13,865 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,866 INFO [StoreOpener-ce84aab708388952511fda968a3634d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ce84aab708388952511fda968a3634d3 columnFamilyName cf 2024-11-21T22:03:13,866 DEBUG [StoreOpener-ce84aab708388952511fda968a3634d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:13,866 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,866 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,866 INFO [StoreOpener-ce84aab708388952511fda968a3634d3-1 {}] regionserver.HStore(327): Store=ce84aab708388952511fda968a3634d3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:13,866 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,867 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,867 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,867 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,867 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,868 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,869 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,870 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:03:13,871 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened 81d0565906c05e3981f19964a793f6eb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59948756, jitterRate=-0.10669392347335815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:13,871 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:13,872 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for 81d0565906c05e3981f19964a793f6eb: Running coprocessor pre-open hook at 1732226593861Writing region info on filesystem at 1732226593861Initializing all the Stores at 1732226593862 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226593862Cleaning up temporary data from old regions at 1732226593867 (+5 ms)Running coprocessor post-open hooks at 1732226593871 (+4 ms)Region opened successfully at 1732226593871 2024-11-21T22:03:13,872 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb., pid=79, masterSystemTime=1732226593855 2024-11-21T22:03:13,873 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:03:13,873 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened ce84aab708388952511fda968a3634d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71409162, jitterRate=0.06407943367958069}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:13,873 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ce84aab708388952511fda968a3634d3 2024-11-21T22:03:13,874 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for ce84aab708388952511fda968a3634d3: Running coprocessor pre-open hook at 1732226593863Writing region info on filesystem at 1732226593863Initializing all the Stores at 1732226593864 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226593864Cleaning up temporary data from old regions at 1732226593868 (+4 ms)Running coprocessor post-open hooks at 1732226593873 (+5 ms)Region opened successfully at 1732226593873 2024-11-21T22:03:13,874 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3., pid=80, masterSystemTime=1732226593858 2024-11-21T22:03:13,874 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,874 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:13,875 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=81d0565906c05e3981f19964a793f6eb, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:03:13,877 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,877 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:13,877 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=ce84aab708388952511fda968a3634d3, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:13,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 81d0565906c05e3981f19964a793f6eb, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:03:13,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure ce84aab708388952511fda968a3634d3, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:13,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=78 2024-11-21T22:03:13,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 81d0565906c05e3981f19964a793f6eb, server=2a884b411602,36419,1732226515183 in 175 msec 2024-11-21T22:03:13,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=77 2024-11-21T22:03:13,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, ASSIGN in 336 msec 2024-11-21T22:03:13,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure ce84aab708388952511fda968a3634d3, server=2a884b411602,44629,1732226515126 in 176 msec 2024-11-21T22:03:13,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-21T22:03:13,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, ASSIGN in 337 msec 2024-11-21T22:03:13,884 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:03:13,884 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226593884"}]},"ts":"1732226593884"} 2024-11-21T22:03:13,886 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-21T22:03:13,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:03:13,888 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-21T22:03:13,891 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-21T22:03:13,940 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:13,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:13,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:13,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:13,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:13,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 501 msec 2024-11-21T22:03:14,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T22:03:14,091 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T22:03:14,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-21T22:03:14,092 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:03:14,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-21T22:03:14,097 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:03:14,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-21T22:03:14,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T22:03:14,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T22:03:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226594102 (current time:1732226594102). 2024-11-21T22:03:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:03:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-21T22:03:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:03:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b2bf534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:14,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:14,105 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:14,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:14,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:14,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@762ad8a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:14,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:14,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,107 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:14,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6db8e4a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:14,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:14,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:14,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:14,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:14,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:14,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,113 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33d6860b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:14,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:14,115 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:14,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:14,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:14,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ba099f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:14,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:14,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,117 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:14,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a3a8b23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:14,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:14,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:14,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:14,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:14,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:14,124 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:14,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,126 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-21T22:03:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:03:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T22:03:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-21T22:03:14,128 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:03:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-21T22:03:14,129 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:03:14,131 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:03:14,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742006_1182 (size=161) 2024-11-21T22:03:14,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742006_1182 (size=161) 2024-11-21T22:03:14,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742006_1182 (size=161) 2024-11-21T22:03:14,141 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:03:14,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb}] 2024-11-21T22:03:14,142 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:14,142 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3 2024-11-21T22:03:14,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-21T22:03:14,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-21T22:03:14,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-21T22:03:14,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for ce84aab708388952511fda968a3634d3: 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. for emptySnaptb0-testConsecutiveExports completed. 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 81d0565906c05e3981f19964a793f6eb: 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. for emptySnaptb0-testConsecutiveExports completed. 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:14,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:03:14,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742008_1184 (size=68) 2024-11-21T22:03:14,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742008_1184 (size=68) 2024-11-21T22:03:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742007_1183 (size=68) 2024-11-21T22:03:14,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742007_1183 (size=68) 2024-11-21T22:03:14,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:14,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-21T22:03:14,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-21T22:03:14,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:14,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:14,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-21T22:03:14,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742007_1183 (size=68) 2024-11-21T22:03:14,312 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:14,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742008_1184 (size=68) 2024-11-21T22:03:14,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-21T22:03:14,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region ce84aab708388952511fda968a3634d3 2024-11-21T22:03:14,312 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3 2024-11-21T22:03:14,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb in 172 msec 2024-11-21T22:03:14,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=82, resume processing ppid=81 2024-11-21T22:03:14,318 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:03:14,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3 in 172 msec 2024-11-21T22:03:14,320 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:03:14,321 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:03:14,321 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-21T22:03:14,322 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-21T22:03:14,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742009_1185 (size=543) 2024-11-21T22:03:14,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742009_1185 (size=543) 2024-11-21T22:03:14,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742009_1185 (size=543) 2024-11-21T22:03:14,344 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:03:14,349 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:03:14,350 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-21T22:03:14,351 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:03:14,351 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-21T22:03:14,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 225 msec 2024-11-21T22:03:14,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-21T22:03:14,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-21T22:03:14,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-21T22:03:14,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-21T22:03:14,443 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T22:03:14,451 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='08d8b3e6fc87837a512f743235bbe79f1', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:03:14,452 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1cf59bb5565d0e185d50de7bfe2316cf4', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:14,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='20bf92af49bbec01df3cb4b902a3a3ae0', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:14,454 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='34ec78390bfc0dfb0ed223ec9b30b0992', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:14,455 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='447814f3b50b9bcc0e1f9d76e634adeb7', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:14,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:03:14,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36419 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:03:14,462 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T22:03:14,465 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-21T22:03:14,465 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:14,465 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:03:14,467 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T22:03:14,473 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T22:03:14,480 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T22:03:14,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T22:03:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226594484 (current time:1732226594484). 2024-11-21T22:03:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:03:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-21T22:03:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:03:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@377a236c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:14,486 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:14,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:14,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:14,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b08f25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:14,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:14,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,488 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:14,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@abdf67f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:14,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:14,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:14,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52882, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:14,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,493 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f61424b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:14,495 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:14,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:14,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:14,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b56e0a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:14,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:14,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,497 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:14,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45ef7678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:14,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:14,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:14,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:14,500 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:14,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:14,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:14,504 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:14,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:14,505 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-21T22:03:14,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:03:14,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T22:03:14,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-21T22:03:14,508 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:03:14,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-21T22:03:14,509 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:03:14,511 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:03:14,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742010_1186 (size=156) 2024-11-21T22:03:14,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742010_1186 (size=156) 2024-11-21T22:03:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742010_1186 (size=156) 2024-11-21T22:03:14,518 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:03:14,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb}] 2024-11-21T22:03:14,519 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3 2024-11-21T22:03:14,519 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:14,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-21T22:03:14,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-21T22:03:14,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-21T22:03:14,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:14,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:14,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing ce84aab708388952511fda968a3634d3 1/1 column families, dataSize=735 B heapSize=1.80 KB 2024-11-21T22:03:14,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 81d0565906c05e3981f19964a793f6eb 1/1 column families, dataSize=2.54 KB heapSize=5.73 KB 2024-11-21T22:03:14,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/.tmp/cf/7d2b973708134a3d80a514ee67d812d3 is 71, key is 00c57200fb631ed5c83f2c9ebb953c07/cf:q/1732226594459/Put/seqid=0 2024-11-21T22:03:14,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/.tmp/cf/141a2ee0402942c0a24a3146168017a6 is 71, key is 13e95e127fc0ca4665af22224c958118/cf:q/1732226594461/Put/seqid=0 2024-11-21T22:03:14,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742011_1187 (size=5838) 2024-11-21T22:03:14,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742012_1188 (size=7780) 2024-11-21T22:03:14,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742011_1187 (size=5838) 2024-11-21T22:03:14,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742012_1188 (size=7780) 2024-11-21T22:03:14,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742011_1187 (size=5838) 2024-11-21T22:03:14,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742012_1188 (size=7780) 2024-11-21T22:03:14,701 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=735 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/.tmp/cf/7d2b973708134a3d80a514ee67d812d3 2024-11-21T22:03:14,701 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.54 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/.tmp/cf/141a2ee0402942c0a24a3146168017a6 2024-11-21T22:03:14,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/.tmp/cf/141a2ee0402942c0a24a3146168017a6 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf/141a2ee0402942c0a24a3146168017a6 2024-11-21T22:03:14,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/.tmp/cf/7d2b973708134a3d80a514ee67d812d3 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf/7d2b973708134a3d80a514ee67d812d3 2024-11-21T22:03:14,713 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf/141a2ee0402942c0a24a3146168017a6, entries=39, sequenceid=6, filesize=7.6 K 2024-11-21T22:03:14,713 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf/7d2b973708134a3d80a514ee67d812d3, entries=11, sequenceid=6, filesize=5.7 K 2024-11-21T22:03:14,714 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~735 B/735, heapSize ~1.78 KB/1824, currentSize=0 B/0 for ce84aab708388952511fda968a3634d3 in 43ms, sequenceid=6, compaction requested=false 2024-11-21T22:03:14,714 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2601, heapSize ~5.72 KB/5856, currentSize=0 B/0 for 81d0565906c05e3981f19964a793f6eb in 43ms, sequenceid=6, compaction requested=false 2024-11-21T22:03:14,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-21T22:03:14,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 81d0565906c05e3981f19964a793f6eb: 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. for snaptb0-testConsecutiveExports completed. 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for ce84aab708388952511fda968a3634d3: 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. for snaptb0-testConsecutiveExports completed. 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf/141a2ee0402942c0a24a3146168017a6] hfiles 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf/141a2ee0402942c0a24a3146168017a6 for snapshot=snaptb0-testConsecutiveExports 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf/7d2b973708134a3d80a514ee67d812d3] hfiles 2024-11-21T22:03:14,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf/7d2b973708134a3d80a514ee67d812d3 for snapshot=snaptb0-testConsecutiveExports 2024-11-21T22:03:14,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742014_1190 (size=107) 2024-11-21T22:03:14,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742014_1190 (size=107) 2024-11-21T22:03:14,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742013_1189 (size=107) 2024-11-21T22:03:14,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742013_1189 (size=107) 2024-11-21T22:03:14,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742014_1190 (size=107) 2024-11-21T22:03:14,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:14,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742013_1189 (size=107) 2024-11-21T22:03:14,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-21T22:03:14,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:14,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-21T22:03:14,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-21T22:03:14,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:14,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-21T22:03:14,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region ce84aab708388952511fda968a3634d3 2024-11-21T22:03:14,727 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:14,727 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3 2024-11-21T22:03:14,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 81d0565906c05e3981f19964a793f6eb in 210 msec 2024-11-21T22:03:14,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=84 2024-11-21T22:03:14,730 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:03:14,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ce84aab708388952511fda968a3634d3 in 210 msec 2024-11-21T22:03:14,731 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:03:14,732 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:03:14,732 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-21T22:03:14,733 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T22:03:14,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742015_1191 (size=621) 2024-11-21T22:03:14,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742015_1191 (size=621) 2024-11-21T22:03:14,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742015_1191 (size=621) 2024-11-21T22:03:14,744 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:03:14,749 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:03:14,750 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T22:03:14,751 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:03:14,751 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-21T22:03:14,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 245 msec 2024-11-21T22:03:14,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-21T22:03:14,822 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T22:03:14,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822 2024-11-21T22:03:14,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:14,853 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:14,853 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@6d27884a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T22:03:14,855 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:03:14,858 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T22:03:14,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:14,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:14,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:15,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-16322261424733325440.jar 2024-11-21T22:03:15,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:15,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-15960859067159111999.jar 2024-11-21T22:03:16,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:16,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:03:16,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:03:16,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:03:16,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:03:16,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:03:16,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:03:16,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:03:16,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:03:16,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:03:16,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:03:16,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:03:16,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:16,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:16,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:16,221 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:16,221 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:16,221 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:16,222 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:16,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742016_1192 (size=131440) 2024-11-21T22:03:16,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742016_1192 (size=131440) 2024-11-21T22:03:16,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742016_1192 (size=131440) 2024-11-21T22:03:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742017_1193 (size=4188619) 2024-11-21T22:03:16,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742017_1193 (size=4188619) 2024-11-21T22:03:16,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742017_1193 (size=4188619) 2024-11-21T22:03:16,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742018_1194 (size=1323991) 2024-11-21T22:03:16,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742018_1194 (size=1323991) 2024-11-21T22:03:16,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742018_1194 (size=1323991) 2024-11-21T22:03:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742019_1195 (size=903739) 2024-11-21T22:03:16,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742019_1195 (size=903739) 2024-11-21T22:03:16,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742019_1195 (size=903739) 2024-11-21T22:03:16,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742020_1196 (size=8360083) 2024-11-21T22:03:16,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742020_1196 (size=8360083) 2024-11-21T22:03:16,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742020_1196 (size=8360083) 2024-11-21T22:03:16,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742021_1197 (size=1877034) 2024-11-21T22:03:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742021_1197 (size=1877034) 2024-11-21T22:03:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742021_1197 (size=1877034) 2024-11-21T22:03:16,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742022_1198 (size=77835) 2024-11-21T22:03:16,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742022_1198 (size=77835) 2024-11-21T22:03:16,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742022_1198 (size=77835) 2024-11-21T22:03:16,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742023_1199 (size=30949) 2024-11-21T22:03:16,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742023_1199 (size=30949) 2024-11-21T22:03:16,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742023_1199 (size=30949) 2024-11-21T22:03:16,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742024_1200 (size=1597347) 2024-11-21T22:03:16,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742024_1200 (size=1597347) 2024-11-21T22:03:16,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742024_1200 (size=1597347) 2024-11-21T22:03:16,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742025_1201 (size=440656) 2024-11-21T22:03:16,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742025_1201 (size=440656) 2024-11-21T22:03:16,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742025_1201 (size=440656) 2024-11-21T22:03:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742026_1202 (size=4695811) 2024-11-21T22:03:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742026_1202 (size=4695811) 2024-11-21T22:03:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742026_1202 (size=4695811) 2024-11-21T22:03:16,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742027_1203 (size=232957) 2024-11-21T22:03:16,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742027_1203 (size=232957) 2024-11-21T22:03:16,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742027_1203 (size=232957) 2024-11-21T22:03:16,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742028_1204 (size=127628) 2024-11-21T22:03:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742028_1204 (size=127628) 2024-11-21T22:03:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742028_1204 (size=127628) 2024-11-21T22:03:16,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742029_1205 (size=20406) 2024-11-21T22:03:16,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742029_1205 (size=20406) 2024-11-21T22:03:16,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742029_1205 (size=20406) 2024-11-21T22:03:16,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742030_1206 (size=5175431) 2024-11-21T22:03:16,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742030_1206 (size=5175431) 2024-11-21T22:03:16,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742030_1206 (size=5175431) 2024-11-21T22:03:16,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742031_1207 (size=217634) 2024-11-21T22:03:16,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742031_1207 (size=217634) 2024-11-21T22:03:16,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742031_1207 (size=217634) 2024-11-21T22:03:16,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742032_1208 (size=1832290) 2024-11-21T22:03:16,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742032_1208 (size=1832290) 2024-11-21T22:03:16,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742032_1208 (size=1832290) 2024-11-21T22:03:16,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742033_1209 (size=322274) 2024-11-21T22:03:16,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742033_1209 (size=322274) 2024-11-21T22:03:16,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742033_1209 (size=322274) 2024-11-21T22:03:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742034_1210 (size=503880) 2024-11-21T22:03:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742034_1210 (size=503880) 2024-11-21T22:03:16,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742034_1210 (size=503880) 2024-11-21T22:03:16,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742035_1211 (size=29229) 2024-11-21T22:03:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742035_1211 (size=29229) 2024-11-21T22:03:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742035_1211 (size=29229) 2024-11-21T22:03:16,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742036_1212 (size=24096) 2024-11-21T22:03:16,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742036_1212 (size=24096) 2024-11-21T22:03:16,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742036_1212 (size=24096) 2024-11-21T22:03:16,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742037_1213 (size=111872) 2024-11-21T22:03:16,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742037_1213 (size=111872) 2024-11-21T22:03:16,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742037_1213 (size=111872) 2024-11-21T22:03:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742038_1214 (size=6424742) 2024-11-21T22:03:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742038_1214 (size=6424742) 2024-11-21T22:03:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742038_1214 (size=6424742) 2024-11-21T22:03:16,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742039_1215 (size=45609) 2024-11-21T22:03:16,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742039_1215 (size=45609) 2024-11-21T22:03:16,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742039_1215 (size=45609) 2024-11-21T22:03:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742040_1216 (size=136454) 2024-11-21T22:03:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742040_1216 (size=136454) 2024-11-21T22:03:16,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742040_1216 (size=136454) 2024-11-21T22:03:16,890 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:03:16,893 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-21T22:03:16,896 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:03:16,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742041_1217 (size=338) 2024-11-21T22:03:16,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742041_1217 (size=338) 2024-11-21T22:03:16,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742041_1217 (size=338) 2024-11-21T22:03:16,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742042_1218 (size=15) 2024-11-21T22:03:16,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742042_1218 (size=15) 2024-11-21T22:03:16,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742042_1218 (size=15) 2024-11-21T22:03:17,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742043_1219 (size=303781) 2024-11-21T22:03:17,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742043_1219 (size=303781) 2024-11-21T22:03:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742043_1219 (size=303781) 2024-11-21T22:03:17,333 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:17,333 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:17,337 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0003_000001 (auth:SIMPLE) from 127.0.0.1:57202 2024-11-21T22:03:17,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0003/container_1732226522247_0003_01_000001/launch_container.sh] 2024-11-21T22:03:17,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0003/container_1732226522247_0003_01_000001/container_tokens] 2024-11-21T22:03:17,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0003/container_1732226522247_0003_01_000001/sysfs] 2024-11-21T22:03:17,423 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0004_000001 (auth:SIMPLE) from 127.0.0.1:56414 2024-11-21T22:03:18,706 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:03:23,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:03:23,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0004_000001 (auth:SIMPLE) from 127.0.0.1:58192 2024-11-21T22:03:23,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742044_1220 (size=349431) 2024-11-21T22:03:23,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742044_1220 (size=349431) 2024-11-21T22:03:23,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742044_1220 (size=349431) 2024-11-21T22:03:25,289 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0004_000001 (auth:SIMPLE) from 127.0.0.1:51520 2024-11-21T22:03:29,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742045_1221 (size=17447) 2024-11-21T22:03:29,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742045_1221 (size=17447) 2024-11-21T22:03:29,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742045_1221 (size=17447) 2024-11-21T22:03:29,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742046_1222 (size=462) 2024-11-21T22:03:29,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742046_1222 (size=462) 2024-11-21T22:03:29,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742046_1222 (size=462) 2024-11-21T22:03:29,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742047_1223 (size=17447) 2024-11-21T22:03:29,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742047_1223 (size=17447) 2024-11-21T22:03:29,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742047_1223 (size=17447) 2024-11-21T22:03:29,134 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0004/container_1732226522247_0004_01_000002/launch_container.sh] 2024-11-21T22:03:29,134 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0004/container_1732226522247_0004_01_000002/container_tokens] 2024-11-21T22:03:29,134 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0004/container_1732226522247_0004_01_000002/sysfs] 2024-11-21T22:03:29,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742048_1224 (size=349431) 2024-11-21T22:03:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742048_1224 (size=349431) 2024-11-21T22:03:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742048_1224 (size=349431) 2024-11-21T22:03:30,303 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:03:30,303 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:03:30,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-21T22:03:30,306 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:03:30,306 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:03:30,306 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T22:03:30,307 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T22:03:30,307 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T22:03:30,307 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@6d27884a in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T22:03:30,307 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T22:03:30,307 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T22:03:30,309 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:30,338 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:30,339 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@6d27884a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T22:03:30,341 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:03:30,347 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T22:03:30,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:30,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:30,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-10839889547228091780.jar 2024-11-21T22:03:31,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-1757385062048614882.jar 2024-11-21T22:03:31,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:31,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:03:31,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:03:31,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:03:31,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:03:31,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:03:31,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:03:31,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:03:31,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:03:31,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:03:31,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:03:31,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:03:31,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:31,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:31,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:31,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:31,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:31,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:31,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:31,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742049_1225 (size=131440) 2024-11-21T22:03:31,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742049_1225 (size=131440) 2024-11-21T22:03:31,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742049_1225 (size=131440) 2024-11-21T22:03:31,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742050_1226 (size=4188619) 2024-11-21T22:03:31,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742050_1226 (size=4188619) 2024-11-21T22:03:31,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742050_1226 (size=4188619) 2024-11-21T22:03:31,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742051_1227 (size=440656) 2024-11-21T22:03:31,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742051_1227 (size=440656) 2024-11-21T22:03:31,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742051_1227 (size=440656) 2024-11-21T22:03:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742052_1228 (size=1323991) 2024-11-21T22:03:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742052_1228 (size=1323991) 2024-11-21T22:03:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742052_1228 (size=1323991) 2024-11-21T22:03:31,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742053_1229 (size=903739) 2024-11-21T22:03:31,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742053_1229 (size=903739) 2024-11-21T22:03:31,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742053_1229 (size=903739) 2024-11-21T22:03:31,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742054_1230 (size=8360083) 2024-11-21T22:03:31,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742054_1230 (size=8360083) 2024-11-21T22:03:31,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742054_1230 (size=8360083) 2024-11-21T22:03:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742055_1231 (size=1877034) 2024-11-21T22:03:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742055_1231 (size=1877034) 2024-11-21T22:03:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742055_1231 (size=1877034) 2024-11-21T22:03:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742056_1232 (size=77835) 2024-11-21T22:03:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742056_1232 (size=77835) 2024-11-21T22:03:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742056_1232 (size=77835) 2024-11-21T22:03:31,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742057_1233 (size=30949) 2024-11-21T22:03:31,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742057_1233 (size=30949) 2024-11-21T22:03:31,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742057_1233 (size=30949) 2024-11-21T22:03:31,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742058_1234 (size=1597347) 2024-11-21T22:03:31,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742058_1234 (size=1597347) 2024-11-21T22:03:31,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742058_1234 (size=1597347) 2024-11-21T22:03:31,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742059_1235 (size=4695811) 2024-11-21T22:03:31,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742059_1235 (size=4695811) 2024-11-21T22:03:31,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742059_1235 (size=4695811) 2024-11-21T22:03:31,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742060_1236 (size=232957) 2024-11-21T22:03:31,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742060_1236 (size=232957) 2024-11-21T22:03:31,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742060_1236 (size=232957) 2024-11-21T22:03:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742061_1237 (size=127628) 2024-11-21T22:03:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742061_1237 (size=127628) 2024-11-21T22:03:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742061_1237 (size=127628) 2024-11-21T22:03:31,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742062_1238 (size=20406) 2024-11-21T22:03:31,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742062_1238 (size=20406) 2024-11-21T22:03:31,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742062_1238 (size=20406) 2024-11-21T22:03:31,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742063_1239 (size=5175431) 2024-11-21T22:03:31,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742063_1239 (size=5175431) 2024-11-21T22:03:31,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742063_1239 (size=5175431) 2024-11-21T22:03:31,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742064_1240 (size=217634) 2024-11-21T22:03:31,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742064_1240 (size=217634) 2024-11-21T22:03:31,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742064_1240 (size=217634) 2024-11-21T22:03:31,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742065_1241 (size=6424742) 2024-11-21T22:03:31,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742065_1241 (size=6424742) 2024-11-21T22:03:31,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742065_1241 (size=6424742) 2024-11-21T22:03:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742066_1242 (size=1832290) 2024-11-21T22:03:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742066_1242 (size=1832290) 2024-11-21T22:03:31,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742066_1242 (size=1832290) 2024-11-21T22:03:31,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742067_1243 (size=322274) 2024-11-21T22:03:31,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742067_1243 (size=322274) 2024-11-21T22:03:31,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742067_1243 (size=322274) 2024-11-21T22:03:31,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742068_1244 (size=503880) 2024-11-21T22:03:31,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742068_1244 (size=503880) 2024-11-21T22:03:31,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742068_1244 (size=503880) 2024-11-21T22:03:31,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742069_1245 (size=29229) 2024-11-21T22:03:31,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742069_1245 (size=29229) 2024-11-21T22:03:31,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742069_1245 (size=29229) 2024-11-21T22:03:31,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742070_1246 (size=24096) 2024-11-21T22:03:31,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742070_1246 (size=24096) 2024-11-21T22:03:31,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742070_1246 (size=24096) 2024-11-21T22:03:31,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742071_1247 (size=111872) 2024-11-21T22:03:31,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742071_1247 (size=111872) 2024-11-21T22:03:31,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742071_1247 (size=111872) 2024-11-21T22:03:31,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742072_1248 (size=45609) 2024-11-21T22:03:31,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742072_1248 (size=45609) 2024-11-21T22:03:31,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742072_1248 (size=45609) 2024-11-21T22:03:31,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742073_1249 (size=136454) 2024-11-21T22:03:31,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742073_1249 (size=136454) 2024-11-21T22:03:31,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742073_1249 (size=136454) 2024-11-21T22:03:31,658 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:03:31,661 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-21T22:03:31,663 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:03:31,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742074_1250 (size=338) 2024-11-21T22:03:31,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742074_1250 (size=338) 2024-11-21T22:03:31,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742074_1250 (size=338) 2024-11-21T22:03:31,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742075_1251 (size=15) 2024-11-21T22:03:31,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742075_1251 (size=15) 2024-11-21T22:03:31,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742075_1251 (size=15) 2024-11-21T22:03:32,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742076_1252 (size=303779) 2024-11-21T22:03:32,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742076_1252 (size=303779) 2024-11-21T22:03:32,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742076_1252 (size=303779) 2024-11-21T22:03:35,246 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:35,246 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:35,250 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0004_000001 (auth:SIMPLE) from 127.0.0.1:51888 2024-11-21T22:03:36,122 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0005_000001 (auth:SIMPLE) from 127.0.0.1:45938 2024-11-21T22:03:40,384 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_3/usercache/jenkins/appcache/application_1732226522247_0004/container_1732226522247_0004_01_000001/launch_container.sh] 2024-11-21T22:03:40,384 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_3/usercache/jenkins/appcache/application_1732226522247_0004/container_1732226522247_0004_01_000001/container_tokens] 2024-11-21T22:03:40,384 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_3/usercache/jenkins/appcache/application_1732226522247_0004/container_1732226522247_0004_01_000001/sysfs] 2024-11-21T22:03:41,676 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0005_000001 (auth:SIMPLE) from 127.0.0.1:44360 2024-11-21T22:03:41,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742077_1253 (size=349429) 2024-11-21T22:03:41,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742077_1253 (size=349429) 2024-11-21T22:03:41,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742077_1253 (size=349429) 2024-11-21T22:03:43,882 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0005_000001 (auth:SIMPLE) from 127.0.0.1:45796 2024-11-21T22:03:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742078_1254 (size=16912) 2024-11-21T22:03:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742078_1254 (size=16912) 2024-11-21T22:03:46,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742078_1254 (size=16912) 2024-11-21T22:03:46,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742079_1255 (size=462) 2024-11-21T22:03:46,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742079_1255 (size=462) 2024-11-21T22:03:46,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742079_1255 (size=462) 2024-11-21T22:03:46,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742080_1256 (size=16912) 2024-11-21T22:03:46,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742080_1256 (size=16912) 2024-11-21T22:03:46,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742080_1256 (size=16912) 2024-11-21T22:03:46,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742081_1257 (size=349429) 2024-11-21T22:03:46,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742081_1257 (size=349429) 2024-11-21T22:03:46,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742081_1257 (size=349429) 2024-11-21T22:03:46,553 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0005_000001 (auth:SIMPLE) from 127.0.0.1:45802 2024-11-21T22:03:46,558 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0005/container_1732226522247_0005_01_000002/launch_container.sh] 2024-11-21T22:03:46,558 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0005/container_1732226522247_0005_01_000002/container_tokens] 2024-11-21T22:03:46,558 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0005/container_1732226522247_0005_01_000002/sysfs] 2024-11-21T22:03:48,264 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:03:48,265 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:03:48,267 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-21T22:03:48,267 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:03:48,268 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:03:48,268 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T22:03:48,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T22:03:48,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T22:03:48,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@6d27884a in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T22:03:48,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T22:03:48,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226594822/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T22:03:48,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-21T22:03:48,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-21T22:03:48,286 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226628286"}]},"ts":"1732226628286"} 2024-11-21T22:03:48,288 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-21T22:03:48,288 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-21T22:03:48,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-21T22:03:48,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, UNASSIGN}] 2024-11-21T22:03:48,291 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, UNASSIGN 2024-11-21T22:03:48,291 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, UNASSIGN 2024-11-21T22:03:48,292 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=ce84aab708388952511fda968a3634d3, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:48,293 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=81d0565906c05e3981f19964a793f6eb, regionState=CLOSING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:03:48,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, UNASSIGN because future has completed 2024-11-21T22:03:48,294 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:03:48,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure ce84aab708388952511fda968a3634d3, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:48,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, UNASSIGN because future has completed 2024-11-21T22:03:48,294 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:03:48,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 81d0565906c05e3981f19964a793f6eb, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:03:48,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-21T22:03:48,447 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close ce84aab708388952511fda968a3634d3 2024-11-21T22:03:48,447 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:03:48,447 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing ce84aab708388952511fda968a3634d3, disabling compactions & flushes 2024-11-21T22:03:48,447 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:48,448 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. after waiting 0 ms 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing 81d0565906c05e3981f19964a793f6eb, disabling compactions & flushes 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:48,448 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. after waiting 0 ms 2024-11-21T22:03:48,448 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:48,453 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:03:48,453 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:03:48,454 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:03:48,454 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:03:48,454 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb. 2024-11-21T22:03:48,454 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3. 2024-11-21T22:03:48,454 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for ce84aab708388952511fda968a3634d3: Waiting for close lock at 1732226628447Running coprocessor pre-close hooks at 1732226628447Disabling compacts and flushes for region at 1732226628447Disabling writes for close at 1732226628448 (+1 ms)Writing region close event to WAL at 1732226628449 (+1 ms)Running coprocessor post-close hooks at 1732226628453 (+4 ms)Closed at 1732226628454 (+1 ms) 2024-11-21T22:03:48,454 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for 81d0565906c05e3981f19964a793f6eb: Waiting for close lock at 1732226628448Running coprocessor pre-close hooks at 1732226628448Disabling compacts and flushes for region at 1732226628448Disabling writes for close at 1732226628448Writing region close event to WAL at 1732226628449 (+1 ms)Running coprocessor post-close hooks at 1732226628454 (+5 ms)Closed at 1732226628454 2024-11-21T22:03:48,455 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed 81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:48,456 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=81d0565906c05e3981f19964a793f6eb, regionState=CLOSED 2024-11-21T22:03:48,456 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed ce84aab708388952511fda968a3634d3 2024-11-21T22:03:48,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=ce84aab708388952511fda968a3634d3, regionState=CLOSED 2024-11-21T22:03:48,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 81d0565906c05e3981f19964a793f6eb, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:03:48,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure ce84aab708388952511fda968a3634d3, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:48,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-11-21T22:03:48,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=89 2024-11-21T22:03:48,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure ce84aab708388952511fda968a3634d3, server=2a884b411602,44629,1732226515126 in 165 msec 2024-11-21T22:03:48,461 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 81d0565906c05e3981f19964a793f6eb, server=2a884b411602,36419,1732226515183 in 164 msec 2024-11-21T22:03:48,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=81d0565906c05e3981f19964a793f6eb, UNASSIGN in 171 msec 2024-11-21T22:03:48,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=89, resume processing ppid=88 2024-11-21T22:03:48,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ce84aab708388952511fda968a3634d3, UNASSIGN in 171 msec 2024-11-21T22:03:48,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-21T22:03:48,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 174 msec 2024-11-21T22:03:48,465 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226628465"}]},"ts":"1732226628465"} 2024-11-21T22:03:48,467 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-21T22:03:48,467 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-21T22:03:48,469 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 185 msec 2024-11-21T22:03:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-21T22:03:48,601 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T22:03:48,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-21T22:03:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,603 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-21T22:03:48,604 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,607 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-21T22:03:48,608 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3 2024-11-21T22:03:48,610 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:48,610 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/recovered.edits] 2024-11-21T22:03:48,611 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/recovered.edits] 2024-11-21T22:03:48,614 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf/7d2b973708134a3d80a514ee67d812d3 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/cf/7d2b973708134a3d80a514ee67d812d3 2024-11-21T22:03:48,615 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf/141a2ee0402942c0a24a3146168017a6 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/cf/141a2ee0402942c0a24a3146168017a6 2024-11-21T22:03:48,618 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3/recovered.edits/9.seqid 2024-11-21T22:03:48,618 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/ce84aab708388952511fda968a3634d3 2024-11-21T22:03:48,619 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb/recovered.edits/9.seqid 2024-11-21T22:03:48,619 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testConsecutiveExports/81d0565906c05e3981f19964a793f6eb 2024-11-21T22:03:48,619 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-21T22:03:48,621 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,624 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-21T22:03:48,627 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-21T22:03:48,629 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,629 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-21T22:03:48,629 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226628629"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:48,629 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226628629"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:48,632 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:03:48,632 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ce84aab708388952511fda968a3634d3, NAME => 'testtb-testConsecutiveExports,,1732226593456.ce84aab708388952511fda968a3634d3.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 81d0565906c05e3981f19964a793f6eb, NAME => 'testtb-testConsecutiveExports,1,1732226593456.81d0565906c05e3981f19964a793f6eb.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:03:48,632 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-21T22:03:48,632 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226628632"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:48,634 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-21T22:03:48,635 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T22:03:48,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 33 msec 2024-11-21T22:03:48,861 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,862 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T22:03:48,862 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T22:03:48,862 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T22:03:48,862 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T22:03:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,979 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T22:03:48,979 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:48,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:48,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-21T22:03:48,980 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-21T22:03:48,980 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T22:03:48,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-21T22:03:48,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-21T22:03:48,990 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-21T22:03:48,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-21T22:03:49,009 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=791 (was 785) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:41172 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 123650) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37147 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:41826 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3826 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:34960 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1026642716_1 at /127.0.0.1:34928 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:38555 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38555 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 805), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=547 (was 573), ProcessCount=16 (was 19), AvailableMemoryMB=795 (was 525) - AvailableMemoryMB LEAK? - 2024-11-21T22:03:49,009 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-21T22:03:49,025 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=791, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=547, ProcessCount=16, AvailableMemoryMB=794 2024-11-21T22:03:49,025 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-21T22:03:49,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:03:49,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:49,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:03:49,029 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:49,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-21T22:03:49,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:03:49,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T22:03:49,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742082_1258 (size=422) 2024-11-21T22:03:49,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742082_1258 (size=422) 2024-11-21T22:03:49,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742082_1258 (size=422) 2024-11-21T22:03:49,038 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8d7c6b050c10dcfdb96a9bcda29bc013, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:49,039 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 11e3bba62c6cbb3574419cc6bedd5cee, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742083_1259 (size=83) 2024-11-21T22:03:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742083_1259 (size=83) 2024-11-21T22:03:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742083_1259 (size=83) 2024-11-21T22:03:49,053 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:49,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 11e3bba62c6cbb3574419cc6bedd5cee, disabling compactions & flushes 2024-11-21T22:03:49,054 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. after waiting 0 ms 2024-11-21T22:03:49,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742084_1260 (size=83) 2024-11-21T22:03:49,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,054 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742084_1260 (size=83) 2024-11-21T22:03:49,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 11e3bba62c6cbb3574419cc6bedd5cee: Waiting for close lock at 1732226629054Disabling compacts and flushes for region at 1732226629054Disabling writes for close at 1732226629054Writing region close event to WAL at 1732226629054Closed at 1732226629054 2024-11-21T22:03:49,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742084_1260 (size=83) 2024-11-21T22:03:49,055 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:49,055 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 8d7c6b050c10dcfdb96a9bcda29bc013, disabling compactions & flushes 2024-11-21T22:03:49,055 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,055 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,055 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. after waiting 0 ms 2024-11-21T22:03:49,055 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,055 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,055 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8d7c6b050c10dcfdb96a9bcda29bc013: Waiting for close lock at 1732226629055Disabling compacts and flushes for region at 1732226629055Disabling writes for close at 1732226629055Writing region close event to WAL at 1732226629055Closed at 1732226629055 2024-11-21T22:03:49,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:03:49,057 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732226629056"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226629056"}]},"ts":"1732226629056"} 2024-11-21T22:03:49,057 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732226629056"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226629056"}]},"ts":"1732226629056"} 2024-11-21T22:03:49,059 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:03:49,060 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:03:49,061 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226629060"}]},"ts":"1732226629060"} 2024-11-21T22:03:49,062 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-21T22:03:49,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:03:49,064 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:03:49,064 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:03:49,064 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:03:49,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:03:49,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, ASSIGN}] 2024-11-21T22:03:49,065 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, ASSIGN 2024-11-21T22:03:49,065 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, ASSIGN 2024-11-21T22:03:49,066 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:03:49,066 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:03:49,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T22:03:49,216 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:03:49,217 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=11e3bba62c6cbb3574419cc6bedd5cee, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:49,217 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=8d7c6b050c10dcfdb96a9bcda29bc013, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:03:49,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, ASSIGN because future has completed 2024-11-21T22:03:49,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:49,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, ASSIGN because future has completed 2024-11-21T22:03:49,220 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:03:49,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T22:03:49,374 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,374 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => 11e3bba62c6cbb3574419cc6bedd5cee, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:03:49,375 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. service=AccessControlService 2024-11-21T22:03:49,375 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:49,375 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,375 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,375 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:49,375 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,375 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 8d7c6b050c10dcfdb96a9bcda29bc013, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:03:49,375 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,376 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. service=AccessControlService 2024-11-21T22:03:49,376 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:49,376 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,376 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:49,376 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,376 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,377 INFO [StoreOpener-11e3bba62c6cbb3574419cc6bedd5cee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,377 INFO [StoreOpener-8d7c6b050c10dcfdb96a9bcda29bc013-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,378 INFO [StoreOpener-11e3bba62c6cbb3574419cc6bedd5cee-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 11e3bba62c6cbb3574419cc6bedd5cee columnFamilyName cf 2024-11-21T22:03:49,378 DEBUG [StoreOpener-11e3bba62c6cbb3574419cc6bedd5cee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:49,378 INFO [StoreOpener-8d7c6b050c10dcfdb96a9bcda29bc013-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d7c6b050c10dcfdb96a9bcda29bc013 columnFamilyName cf 2024-11-21T22:03:49,378 DEBUG [StoreOpener-8d7c6b050c10dcfdb96a9bcda29bc013-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:49,378 INFO [StoreOpener-11e3bba62c6cbb3574419cc6bedd5cee-1 {}] regionserver.HStore(327): Store=11e3bba62c6cbb3574419cc6bedd5cee/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:49,379 INFO [StoreOpener-8d7c6b050c10dcfdb96a9bcda29bc013-1 {}] regionserver.HStore(327): Store=8d7c6b050c10dcfdb96a9bcda29bc013/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:49,379 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,379 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,379 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,380 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,381 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,382 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,383 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:03:49,384 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:03:49,384 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 8d7c6b050c10dcfdb96a9bcda29bc013; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61797445, jitterRate=-0.07914631068706512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:49,384 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,384 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened 11e3bba62c6cbb3574419cc6bedd5cee; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69350767, jitterRate=0.03340695798397064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:49,384 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,385 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 8d7c6b050c10dcfdb96a9bcda29bc013: Running coprocessor pre-open hook at 1732226629376Writing region info on filesystem at 1732226629376Initializing all the Stores at 1732226629377 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226629377Cleaning up temporary data from old regions at 1732226629380 (+3 ms)Running coprocessor post-open hooks at 1732226629384 (+4 ms)Region opened successfully at 1732226629384 2024-11-21T22:03:49,385 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for 11e3bba62c6cbb3574419cc6bedd5cee: Running coprocessor pre-open hook at 1732226629375Writing region info on filesystem at 1732226629375Initializing all the Stores at 1732226629376 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226629376Cleaning up temporary data from old regions at 1732226629380 (+4 ms)Running coprocessor post-open hooks at 1732226629384 (+4 ms)Region opened successfully at 1732226629384 2024-11-21T22:03:49,385 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013., pid=98, masterSystemTime=1732226629372 2024-11-21T22:03:49,386 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee., pid=97, masterSystemTime=1732226629371 2024-11-21T22:03:49,387 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,387 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,388 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=8d7c6b050c10dcfdb96a9bcda29bc013, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:03:49,388 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,388 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,388 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=11e3bba62c6cbb3574419cc6bedd5cee, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:49,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:03:49,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:49,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=95 2024-11-21T22:03:49,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013, server=2a884b411602,41399,1732226514979 in 170 msec 2024-11-21T22:03:49,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, ASSIGN in 328 msec 2024-11-21T22:03:49,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-21T22:03:49,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee, server=2a884b411602,44629,1732226515126 in 172 msec 2024-11-21T22:03:49,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-21T22:03:49,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, ASSIGN in 329 msec 2024-11-21T22:03:49,395 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:03:49,395 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226629395"}]},"ts":"1732226629395"} 2024-11-21T22:03:49,397 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-21T22:03:49,398 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:03:49,398 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-21T22:03:49,401 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-21T22:03:49,419 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:49,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:49,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:49,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:49,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:49,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:49,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:49,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:49,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 401 msec 2024-11-21T22:03:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T22:03:49,661 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T22:03:49,662 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-21T22:03:49,662 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:03:49,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-21T22:03:49,668 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:03:49,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-21T22:03:49,668 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T22:03:49,673 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T22:03:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226629673 (current time:1732226629673). 2024-11-21T22:03:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:03:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-21T22:03:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:03:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2c2ced, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:49,675 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:49,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:49,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:49,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b5b4dd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:49,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:49,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:49,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:49,678 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:49,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfd82f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:49,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:49,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:49,684 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:49,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:49,686 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fcf0a2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:49,689 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:49,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:49,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:49,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@766dbfd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:49,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:49,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:49,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:49,691 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:49,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a194ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:49,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:49,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:49,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:49,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:49,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:49,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:49,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:49,701 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-21T22:03:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:03:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T22:03:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-21T22:03:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-21T22:03:49,716 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:03:49,717 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:03:49,720 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:03:49,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742085_1261 (size=215) 2024-11-21T22:03:49,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742085_1261 (size=215) 2024-11-21T22:03:49,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742085_1261 (size=215) 2024-11-21T22:03:49,729 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:03:49,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee}] 2024-11-21T22:03:49,731 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,731 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,733 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=2a884b411602,44629,1732226515126, table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T22:03:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-21T22:03:49,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-21T22:03:49,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-21T22:03:49,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for 8d7c6b050c10dcfdb96a9bcda29bc013: 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for 11e3bba62c6cbb3574419cc6bedd5cee: 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:03:49,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:03:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742086_1262 (size=86) 2024-11-21T22:03:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742086_1262 (size=86) 2024-11-21T22:03:49,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742087_1263 (size=86) 2024-11-21T22:03:49,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742086_1262 (size=86) 2024-11-21T22:03:49,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742087_1263 (size=86) 2024-11-21T22:03:49,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742087_1263 (size=86) 2024-11-21T22:03:49,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:49,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-21T22:03:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-21T22:03:49,894 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,895 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:49,897 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013 in 166 msec 2024-11-21T22:03:49,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:49,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-21T22:03:49,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-21T22:03:49,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,899 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:49,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=101, resume processing ppid=99 2024-11-21T22:03:49,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee in 171 msec 2024-11-21T22:03:49,901 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:03:49,902 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:03:49,902 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:03:49,903 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:49,903 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:49,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742088_1264 (size=597) 2024-11-21T22:03:49,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742088_1264 (size=597) 2024-11-21T22:03:49,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742088_1264 (size=597) 2024-11-21T22:03:49,912 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:03:49,917 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:03:49,917 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:49,919 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:03:49,919 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-21T22:03:49,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 217 msec 2024-11-21T22:03:50,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-21T22:03:50,022 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T22:03:50,029 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0ea8040acf234e38c7148390eaff81af6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:03:50,031 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='1fbddf37a660bd8914894e73ed0e2bb08', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:03:50,031 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2f75258f503733106148cf0c9de05ccae', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:03:50,033 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3ba03ab1e0cd1bfe07cfbf513df1465cd', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:03:50,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:03:50,038 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:03:50,039 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T22:03:50,042 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,042 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:50,042 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:03:50,044 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T22:03:50,048 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T22:03:50,053 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T22:03:50,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T22:03:50,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226630055 (current time:1732226630055). 2024-11-21T22:03:50,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:03:50,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-21T22:03:50,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:03:50,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@749069a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:50,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:50,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:50,056 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:50,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:50,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:50,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b563bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:50,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:50,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:50,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:50,058 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:50,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b3976d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:50,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:50,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:50,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:50,060 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:50,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:50,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:50,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:50,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:50,062 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:50,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45cf2320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:50,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:50,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:50,064 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:50,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:50,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:50,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31757993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:50,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:50,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:50,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:50,065 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:50,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a2f1f4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:50,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:50,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:50,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:50,068 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:50,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:50,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:50,072 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:50,073 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:50,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:50,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:50,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:50,074 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:50,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-21T22:03:50,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:03:50,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T22:03:50,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-21T22:03:50,078 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:03:50,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T22:03:50,079 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:03:50,082 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:03:50,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742089_1265 (size=210) 2024-11-21T22:03:50,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742089_1265 (size=210) 2024-11-21T22:03:50,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742089_1265 (size=210) 2024-11-21T22:03:50,088 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:03:50,088 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee}] 2024-11-21T22:03:50,089 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:50,089 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:50,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T22:03:50,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-21T22:03:50,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-21T22:03:50,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:50,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:50,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing 8d7c6b050c10dcfdb96a9bcda29bc013 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-21T22:03:50,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing 11e3bba62c6cbb3574419cc6bedd5cee 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-21T22:03:50,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/.tmp/cf/f13e2c11cb3c4b838027b9c2947187f8 is 71, key is 01114bfd804a8ef16a3215a2783074a7/cf:q/1732226630036/Put/seqid=0 2024-11-21T22:03:50,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/.tmp/cf/3a5e0b7732e74c0bb8d83741ae34b65f is 71, key is 10e6197ffca23bca0fe668355b0c922a/cf:q/1732226630037/Put/seqid=0 2024-11-21T22:03:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742091_1267 (size=8052) 2024-11-21T22:03:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742090_1266 (size=5568) 2024-11-21T22:03:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742090_1266 (size=5568) 2024-11-21T22:03:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742091_1267 (size=8052) 2024-11-21T22:03:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742090_1266 (size=5568) 2024-11-21T22:03:50,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742091_1267 (size=8052) 2024-11-21T22:03:50,271 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/.tmp/cf/f13e2c11cb3c4b838027b9c2947187f8 2024-11-21T22:03:50,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/.tmp/cf/3a5e0b7732e74c0bb8d83741ae34b65f 2024-11-21T22:03:50,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/.tmp/cf/3a5e0b7732e74c0bb8d83741ae34b65f as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf/3a5e0b7732e74c0bb8d83741ae34b65f 2024-11-21T22:03:50,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/.tmp/cf/f13e2c11cb3c4b838027b9c2947187f8 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf/f13e2c11cb3c4b838027b9c2947187f8 2024-11-21T22:03:50,287 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf/f13e2c11cb3c4b838027b9c2947187f8, entries=7, sequenceid=6, filesize=5.4 K 2024-11-21T22:03:50,287 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf/3a5e0b7732e74c0bb8d83741ae34b65f, entries=43, sequenceid=6, filesize=7.9 K 2024-11-21T22:03:50,289 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 8d7c6b050c10dcfdb96a9bcda29bc013 in 48ms, sequenceid=6, compaction requested=false 2024-11-21T22:03:50,289 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 11e3bba62c6cbb3574419cc6bedd5cee in 48ms, sequenceid=6, compaction requested=false 2024-11-21T22:03:50,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-21T22:03:50,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for 11e3bba62c6cbb3574419cc6bedd5cee: 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 8d7c6b050c10dcfdb96a9bcda29bc013: 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf/f13e2c11cb3c4b838027b9c2947187f8] hfiles 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf/3a5e0b7732e74c0bb8d83741ae34b65f] hfiles 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf/f13e2c11cb3c4b838027b9c2947187f8 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf/3a5e0b7732e74c0bb8d83741ae34b65f for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742093_1269 (size=125) 2024-11-21T22:03:50,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742092_1268 (size=125) 2024-11-21T22:03:50,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:03:50,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-21T22:03:50,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742092_1268 (size=125) 2024-11-21T22:03:50,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742092_1268 (size=125) 2024-11-21T22:03:50,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742093_1269 (size=125) 2024-11-21T22:03:50,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742093_1269 (size=125) 2024-11-21T22:03:50,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-21T22:03:50,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:50,314 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:03:50,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:03:50,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-21T22:03:50,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-21T22:03:50,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:50,316 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:03:50,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee in 227 msec 2024-11-21T22:03:50,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-21T22:03:50,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013 in 229 msec 2024-11-21T22:03:50,318 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:03:50,319 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:03:50,320 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:03:50,320 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,320 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742094_1270 (size=675) 2024-11-21T22:03:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742094_1270 (size=675) 2024-11-21T22:03:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742094_1270 (size=675) 2024-11-21T22:03:50,332 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:03:50,337 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:03:50,337 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:50,339 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:03:50,339 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-21T22:03:50,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 264 msec 2024-11-21T22:03:50,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T22:03:50,392 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T22:03:50,411 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T22:03:50,412 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T22:03:50,413 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T22:03:50,413 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37454, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T22:03:50,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T22:03:50,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T22:03:50,414 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-21T22:03:50,414 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36419 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-21T22:03:50,414 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-21T22:03:50,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:03:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:50,418 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:03:50,418 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:50,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-21T22:03:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T22:03:50,419 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:03:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742095_1271 (size=399) 2024-11-21T22:03:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742095_1271 (size=399) 2024-11-21T22:03:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742095_1271 (size=399) 2024-11-21T22:03:50,428 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c195f33e03a4585450be8f5d5733a1fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:50,428 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 903fe2287b2a9b8f8e279126f6f21af5, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742097_1273 (size=85) 2024-11-21T22:03:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742097_1273 (size=85) 2024-11-21T22:03:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742096_1272 (size=85) 2024-11-21T22:03:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742097_1273 (size=85) 2024-11-21T22:03:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742096_1272 (size=85) 2024-11-21T22:03:50,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742096_1272 (size=85) 2024-11-21T22:03:50,438 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 903fe2287b2a9b8f8e279126f6f21af5, disabling compactions & flushes 2024-11-21T22:03:50,439 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. after waiting 0 ms 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,439 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 903fe2287b2a9b8f8e279126f6f21af5: Waiting for close lock at 1732226630439Disabling compacts and flushes for region at 1732226630439Disabling writes for close at 1732226630439Writing region close event to WAL at 1732226630439Closed at 1732226630439 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing c195f33e03a4585450be8f5d5733a1fe, disabling compactions & flushes 2024-11-21T22:03:50,439 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. after waiting 0 ms 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,439 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,439 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for c195f33e03a4585450be8f5d5733a1fe: Waiting for close lock at 1732226630439Disabling compacts and flushes for region at 1732226630439Disabling writes for close at 1732226630439Writing region close event to WAL at 1732226630439Closed at 1732226630439 2024-11-21T22:03:50,440 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:03:50,440 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732226630440"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226630440"}]},"ts":"1732226630440"} 2024-11-21T22:03:50,441 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732226630440"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226630440"}]},"ts":"1732226630440"} 2024-11-21T22:03:50,443 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:03:50,443 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:03:50,444 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226630443"}]},"ts":"1732226630443"} 2024-11-21T22:03:50,445 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-21T22:03:50,446 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:03:50,447 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:03:50,447 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:03:50,447 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:03:50,447 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:03:50,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, ASSIGN}] 2024-11-21T22:03:50,449 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, ASSIGN 2024-11-21T22:03:50,449 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, ASSIGN 2024-11-21T22:03:50,449 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:03:50,449 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:03:50,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T22:03:50,600 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:03:50,601 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=903fe2287b2a9b8f8e279126f6f21af5, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:03:50,601 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=c195f33e03a4585450be8f5d5733a1fe, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:50,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, ASSIGN because future has completed 2024-11-21T22:03:50,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 903fe2287b2a9b8f8e279126f6f21af5, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:03:50,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, ASSIGN because future has completed 2024-11-21T22:03:50,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure c195f33e03a4585450be8f5d5733a1fe, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:50,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T22:03:50,763 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,763 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,763 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => 903fe2287b2a9b8f8e279126f6f21af5, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5.', STARTKEY => '2', ENDKEY => ''} 2024-11-21T22:03:50,763 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => c195f33e03a4585450be8f5d5733a1fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe.', STARTKEY => '', ENDKEY => '2'} 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. service=AccessControlService 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. service=AccessControlService 2024-11-21T22:03:50,764 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:50,764 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,764 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,766 INFO [StoreOpener-903fe2287b2a9b8f8e279126f6f21af5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,766 INFO [StoreOpener-c195f33e03a4585450be8f5d5733a1fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,767 INFO [StoreOpener-903fe2287b2a9b8f8e279126f6f21af5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 903fe2287b2a9b8f8e279126f6f21af5 columnFamilyName cf 2024-11-21T22:03:50,767 INFO [StoreOpener-c195f33e03a4585450be8f5d5733a1fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c195f33e03a4585450be8f5d5733a1fe columnFamilyName cf 2024-11-21T22:03:50,767 DEBUG [StoreOpener-903fe2287b2a9b8f8e279126f6f21af5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:50,767 DEBUG [StoreOpener-c195f33e03a4585450be8f5d5733a1fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:50,768 INFO [StoreOpener-c195f33e03a4585450be8f5d5733a1fe-1 {}] regionserver.HStore(327): Store=c195f33e03a4585450be8f5d5733a1fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:50,768 INFO [StoreOpener-903fe2287b2a9b8f8e279126f6f21af5-1 {}] regionserver.HStore(327): Store=903fe2287b2a9b8f8e279126f6f21af5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:50,768 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,768 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,768 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,768 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,769 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,769 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,769 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,769 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,769 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,769 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,771 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,771 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,773 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:03:50,773 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:03:50,773 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened 903fe2287b2a9b8f8e279126f6f21af5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71404866, jitterRate=0.06401541829109192}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:50,773 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:50,774 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for 903fe2287b2a9b8f8e279126f6f21af5: Running coprocessor pre-open hook at 1732226630764Writing region info on filesystem at 1732226630764Initializing all the Stores at 1732226630765 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226630765Cleaning up temporary data from old regions at 1732226630769 (+4 ms)Running coprocessor post-open hooks at 1732226630773 (+4 ms)Region opened successfully at 1732226630774 (+1 ms) 2024-11-21T22:03:50,774 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened c195f33e03a4585450be8f5d5733a1fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61862716, jitterRate=-0.0781736969947815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:50,774 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:50,775 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for c195f33e03a4585450be8f5d5733a1fe: Running coprocessor pre-open hook at 1732226630764Writing region info on filesystem at 1732226630764Initializing all the Stores at 1732226630765 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226630765Cleaning up temporary data from old regions at 1732226630769 (+4 ms)Running coprocessor post-open hooks at 1732226630774 (+5 ms)Region opened successfully at 1732226630774 2024-11-21T22:03:50,775 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5., pid=108, masterSystemTime=1732226630759 2024-11-21T22:03:50,775 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe., pid=109, masterSystemTime=1732226630761 2024-11-21T22:03:50,777 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,777 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:50,778 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=903fe2287b2a9b8f8e279126f6f21af5, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:03:50,783 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,784 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:50,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 903fe2287b2a9b8f8e279126f6f21af5, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:03:50,785 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=c195f33e03a4585450be8f5d5733a1fe, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:50,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure c195f33e03a4585450be8f5d5733a1fe, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:50,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-21T22:03:50,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 903fe2287b2a9b8f8e279126f6f21af5, server=2a884b411602,36419,1732226515183 in 179 msec 2024-11-21T22:03:50,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=106 2024-11-21T22:03:50,794 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, ASSIGN in 346 msec 2024-11-21T22:03:50,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure c195f33e03a4585450be8f5d5733a1fe, server=2a884b411602,44629,1732226515126 in 182 msec 2024-11-21T22:03:50,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=105 2024-11-21T22:03:50,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, ASSIGN in 347 msec 2024-11-21T22:03:50,797 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:03:50,797 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226630797"}]},"ts":"1732226630797"} 2024-11-21T22:03:50,799 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-21T22:03:50,800 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:03:50,800 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-21T22:03:50,803 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-21T22:03:50,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:50,860 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:50,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:50,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T22:03:50,871 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 453 msec 2024-11-21T22:03:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T22:03:51,051 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T22:03:51,053 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:03:51,057 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:51,059 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-21T22:03:51,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [c195f33e03a4585450be8f5d5733a1fe, 903fe2287b2a9b8f8e279126f6f21af5] 2024-11-21T22:03:51,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c195f33e03a4585450be8f5d5733a1fe, 903fe2287b2a9b8f8e279126f6f21af5], force=true 2024-11-21T22:03:51,080 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c195f33e03a4585450be8f5d5733a1fe, 903fe2287b2a9b8f8e279126f6f21af5], force=true 2024-11-21T22:03:51,080 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c195f33e03a4585450be8f5d5733a1fe, 903fe2287b2a9b8f8e279126f6f21af5], force=true 2024-11-21T22:03:51,080 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c195f33e03a4585450be8f5d5733a1fe, 903fe2287b2a9b8f8e279126f6f21af5], force=true 2024-11-21T22:03:51,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T22:03:51,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, UNASSIGN}] 2024-11-21T22:03:51,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, UNASSIGN 2024-11-21T22:03:51,095 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, UNASSIGN 2024-11-21T22:03:51,096 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=c195f33e03a4585450be8f5d5733a1fe, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:51,096 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=903fe2287b2a9b8f8e279126f6f21af5, regionState=CLOSING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:03:51,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, UNASSIGN because future has completed 2024-11-21T22:03:51,098 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-21T22:03:51,098 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure c195f33e03a4585450be8f5d5733a1fe, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:51,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, UNASSIGN because future has completed 2024-11-21T22:03:51,099 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-21T22:03:51,099 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 903fe2287b2a9b8f8e279126f6f21af5, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:03:51,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T22:03:51,251 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:51,251 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-21T22:03:51,251 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing c195f33e03a4585450be8f5d5733a1fe, disabling compactions & flushes 2024-11-21T22:03:51,251 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:51,251 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:51,251 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. after waiting 0 ms 2024-11-21T22:03:51,251 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:51,251 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing c195f33e03a4585450be8f5d5733a1fe 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-21T22:03:51,252 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:51,252 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-21T22:03:51,252 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing 903fe2287b2a9b8f8e279126f6f21af5, disabling compactions & flushes 2024-11-21T22:03:51,252 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:51,252 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:51,252 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. after waiting 0 ms 2024-11-21T22:03:51,252 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:51,252 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing 903fe2287b2a9b8f8e279126f6f21af5 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-21T22:03:51,275 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/.tmp/cf/cd1b699ca4784e6ca0bd670955d311b5 is 28, key is 1/cf:/1732226631054/Put/seqid=0 2024-11-21T22:03:51,276 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/.tmp/cf/fec05b66213340f19530017c9cf0c9be is 28, key is 2/cf:/1732226631058/Put/seqid=0 2024-11-21T22:03:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742098_1274 (size=4945) 2024-11-21T22:03:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742098_1274 (size=4945) 2024-11-21T22:03:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742098_1274 (size=4945) 2024-11-21T22:03:51,289 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/.tmp/cf/cd1b699ca4784e6ca0bd670955d311b5 2024-11-21T22:03:51,296 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/.tmp/cf/cd1b699ca4784e6ca0bd670955d311b5 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf/cd1b699ca4784e6ca0bd670955d311b5 2024-11-21T22:03:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742099_1275 (size=4945) 2024-11-21T22:03:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742099_1275 (size=4945) 2024-11-21T22:03:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742099_1275 (size=4945) 2024-11-21T22:03:51,297 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/.tmp/cf/fec05b66213340f19530017c9cf0c9be 2024-11-21T22:03:51,303 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/.tmp/cf/fec05b66213340f19530017c9cf0c9be as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf/fec05b66213340f19530017c9cf0c9be 2024-11-21T22:03:51,306 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf/cd1b699ca4784e6ca0bd670955d311b5, entries=1, sequenceid=5, filesize=4.8 K 2024-11-21T22:03:51,307 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for c195f33e03a4585450be8f5d5733a1fe in 56ms, sequenceid=5, compaction requested=false 2024-11-21T22:03:51,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-21T22:03:51,310 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf/fec05b66213340f19530017c9cf0c9be, entries=1, sequenceid=5, filesize=4.8 K 2024-11-21T22:03:51,313 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T22:03:51,313 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:03:51,314 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. 2024-11-21T22:03:51,314 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for c195f33e03a4585450be8f5d5733a1fe: Waiting for close lock at 1732226631251Running coprocessor pre-close hooks at 1732226631251Disabling compacts and flushes for region at 1732226631251Disabling writes for close at 1732226631251Obtaining lock to block concurrent updates at 1732226631251Preparing flush snapshotting stores in c195f33e03a4585450be8f5d5733a1fe at 1732226631251Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732226631252 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe. at 1732226631253 (+1 ms)Flushing c195f33e03a4585450be8f5d5733a1fe/cf: creating writer at 1732226631253Flushing c195f33e03a4585450be8f5d5733a1fe/cf: appending metadata at 1732226631274 (+21 ms)Flushing c195f33e03a4585450be8f5d5733a1fe/cf: closing flushed file at 1732226631275 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a2dac36: reopening flushed file at 1732226631295 (+20 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for c195f33e03a4585450be8f5d5733a1fe in 56ms, sequenceid=5, compaction requested=false at 1732226631307 (+12 ms)Writing region close event to WAL at 1732226631309 (+2 ms)Running coprocessor post-close hooks at 1732226631313 (+4 ms)Closed at 1732226631313 2024-11-21T22:03:51,316 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:03:51,316 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=c195f33e03a4585450be8f5d5733a1fe, regionState=CLOSED 2024-11-21T22:03:51,318 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 903fe2287b2a9b8f8e279126f6f21af5 in 66ms, sequenceid=5, compaction requested=false 2024-11-21T22:03:51,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure c195f33e03a4585450be8f5d5733a1fe, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:51,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=111 2024-11-21T22:03:51,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure c195f33e03a4585450be8f5d5733a1fe, server=2a884b411602,44629,1732226515126 in 223 msec 2024-11-21T22:03:51,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c195f33e03a4585450be8f5d5733a1fe, UNASSIGN in 229 msec 2024-11-21T22:03:51,327 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T22:03:51,328 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:03:51,328 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. 2024-11-21T22:03:51,328 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for 903fe2287b2a9b8f8e279126f6f21af5: Waiting for close lock at 1732226631252Running coprocessor pre-close hooks at 1732226631252Disabling compacts and flushes for region at 1732226631252Disabling writes for close at 1732226631252Obtaining lock to block concurrent updates at 1732226631252Preparing flush snapshotting stores in 903fe2287b2a9b8f8e279126f6f21af5 at 1732226631252Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732226631252Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5. at 1732226631253 (+1 ms)Flushing 903fe2287b2a9b8f8e279126f6f21af5/cf: creating writer at 1732226631253Flushing 903fe2287b2a9b8f8e279126f6f21af5/cf: appending metadata at 1732226631275 (+22 ms)Flushing 903fe2287b2a9b8f8e279126f6f21af5/cf: closing flushed file at 1732226631275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35b6bcf: reopening flushed file at 1732226631302 (+27 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 903fe2287b2a9b8f8e279126f6f21af5 in 66ms, sequenceid=5, compaction requested=false at 1732226631318 (+16 ms)Writing region close event to WAL at 1732226631319 (+1 ms)Running coprocessor post-close hooks at 1732226631328 (+9 ms)Closed at 1732226631328 2024-11-21T22:03:51,330 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed 903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:03:51,331 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=903fe2287b2a9b8f8e279126f6f21af5, regionState=CLOSED 2024-11-21T22:03:51,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 903fe2287b2a9b8f8e279126f6f21af5, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:03:51,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=112 2024-11-21T22:03:51,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 903fe2287b2a9b8f8e279126f6f21af5, server=2a884b411602,36419,1732226515183 in 234 msec 2024-11-21T22:03:51,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-11-21T22:03:51,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=903fe2287b2a9b8f8e279126f6f21af5, UNASSIGN in 241 msec 2024-11-21T22:03:51,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742100_1276 (size=84) 2024-11-21T22:03:51,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742100_1276 (size=84) 2024-11-21T22:03:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742100_1276 (size=84) 2024-11-21T22:03:51,351 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742101_1277 (size=20) 2024-11-21T22:03:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742101_1277 (size=20) 2024-11-21T22:03:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742101_1277 (size=20) 2024-11-21T22:03:51,363 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:51,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742102_1278 (size=21) 2024-11-21T22:03:51,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742102_1278 (size=21) 2024-11-21T22:03:51,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742102_1278 (size=21) 2024-11-21T22:03:51,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742103_1279 (size=84) 2024-11-21T22:03:51,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742103_1279 (size=84) 2024-11-21T22:03:51,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742103_1279 (size=84) 2024-11-21T22:03:51,376 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:51,387 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-21T22:03:51,389 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630416.c195f33e03a4585450be8f5d5733a1fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:51,389 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732226630416.903fe2287b2a9b8f8e279126f6f21af5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:51,390 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-21T22:03:51,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T22:03:51,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, ASSIGN}] 2024-11-21T22:03:51,407 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, ASSIGN 2024-11-21T22:03:51,408 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, ASSIGN; state=MERGED, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:03:51,558 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-21T22:03:51,559 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=e8c8a757ef6be4acb9d13d01717f47b1, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:51,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, ASSIGN because future has completed 2024-11-21T22:03:51,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:03:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T22:03:51,716 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:03:51,716 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => e8c8a757ef6be4acb9d13d01717f47b1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.', STARTKEY => '', ENDKEY => ''} 2024-11-21T22:03:51,716 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. service=AccessControlService 2024-11-21T22:03:51,716 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:03:51,716 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,716 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:03:51,717 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,717 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,718 INFO [StoreOpener-e8c8a757ef6be4acb9d13d01717f47b1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,719 INFO [StoreOpener-e8c8a757ef6be4acb9d13d01717f47b1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8c8a757ef6be4acb9d13d01717f47b1 columnFamilyName cf 2024-11-21T22:03:51,719 DEBUG [StoreOpener-e8c8a757ef6be4acb9d13d01717f47b1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:03:51,747 DEBUG [StoreOpener-e8c8a757ef6be4acb9d13d01717f47b1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/cd1b699ca4784e6ca0bd670955d311b5.c195f33e03a4585450be8f5d5733a1fe->hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf/cd1b699ca4784e6ca0bd670955d311b5-top 2024-11-21T22:03:51,753 DEBUG [StoreOpener-e8c8a757ef6be4acb9d13d01717f47b1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/fec05b66213340f19530017c9cf0c9be.903fe2287b2a9b8f8e279126f6f21af5->hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf/fec05b66213340f19530017c9cf0c9be-top 2024-11-21T22:03:51,754 INFO [StoreOpener-e8c8a757ef6be4acb9d13d01717f47b1-1 {}] regionserver.HStore(327): Store=e8c8a757ef6be4acb9d13d01717f47b1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:03:51,754 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,755 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,756 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,757 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,757 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,759 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,760 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened e8c8a757ef6be4acb9d13d01717f47b1; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71800433, jitterRate=0.06990982592105865}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:03:51,760 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:51,761 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for e8c8a757ef6be4acb9d13d01717f47b1: Running coprocessor pre-open hook at 1732226631717Writing region info on filesystem at 1732226631717Initializing all the Stores at 1732226631718 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226631718Cleaning up temporary data from old regions at 1732226631757 (+39 ms)Running coprocessor post-open hooks at 1732226631760 (+3 ms)Region opened successfully at 1732226631761 (+1 ms) 2024-11-21T22:03:51,762 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1., pid=116, masterSystemTime=1732226631713 2024-11-21T22:03:51,762 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.,because compaction is disabled. 2024-11-21T22:03:51,765 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:03:51,765 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:03:51,766 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=e8c8a757ef6be4acb9d13d01717f47b1, regionState=OPEN, openSeqNum=9, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:03:51,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:03:51,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-21T22:03:51,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1, server=2a884b411602,44629,1732226515126 in 208 msec 2024-11-21T22:03:51,773 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-21T22:03:51,773 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, ASSIGN in 366 msec 2024-11-21T22:03:51,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c195f33e03a4585450be8f5d5733a1fe, 903fe2287b2a9b8f8e279126f6f21af5], force=true in 698 msec 2024-11-21T22:03:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T22:03:52,222 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T22:03:52,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-21T22:03:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226632222 (current time:1732226632222). 2024-11-21T22:03:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:03:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-21T22:03:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:03:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@164e79a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:52,224 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:52,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:52,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:52,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fffd335, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:52,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:52,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:52,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:52,227 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:52,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d83ec3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:52,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:52,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:52,230 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52838, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:52,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:52,233 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20129145, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:03:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:03:52,242 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:03:52,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:03:52,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:03:52,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44b87fe6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:52,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:03:52,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:03:52,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:52,244 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37678, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:03:52,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42052f4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:03:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:03:52,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:03:52,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:52,248 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52854, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:52,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:03:52,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:03:52,252 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:03:52,254 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:03:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:03:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:03:52,254 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:03:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-21T22:03:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:03:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-21T22:03:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-21T22:03:52,257 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:03:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-21T22:03:52,258 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:03:52,261 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:03:52,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742104_1280 (size=216) 2024-11-21T22:03:52,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742104_1280 (size=216) 2024-11-21T22:03:52,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742104_1280 (size=216) 2024-11-21T22:03:52,275 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:03:52,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1}] 2024-11-21T22:03:52,276 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-21T22:03:52,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-21T22:03:52,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:03:52,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for e8c8a757ef6be4acb9d13d01717f47b1: 2024-11-21T22:03:52,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-21T22:03:52,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:03:52,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/cd1b699ca4784e6ca0bd670955d311b5.c195f33e03a4585450be8f5d5733a1fe->hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf/cd1b699ca4784e6ca0bd670955d311b5-top, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/fec05b66213340f19530017c9cf0c9be.903fe2287b2a9b8f8e279126f6f21af5->hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf/fec05b66213340f19530017c9cf0c9be-top] hfiles 2024-11-21T22:03:52,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/cd1b699ca4784e6ca0bd670955d311b5.c195f33e03a4585450be8f5d5733a1fe for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,431 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/fec05b66213340f19530017c9cf0c9be.903fe2287b2a9b8f8e279126f6f21af5 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742105_1281 (size=269) 2024-11-21T22:03:52,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742105_1281 (size=269) 2024-11-21T22:03:52,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:03:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742105_1281 (size=269) 2024-11-21T22:03:52,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-21T22:03:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-21T22:03:52,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:52,453 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:03:52,458 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-21T22:03:52,458 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:03:52,458 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1 in 180 msec 2024-11-21T22:03:52,460 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:03:52,460 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:03:52,461 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,461 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742106_1282 (size=670) 2024-11-21T22:03:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742106_1282 (size=670) 2024-11-21T22:03:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742106_1282 (size=670) 2024-11-21T22:03:52,497 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:03:52,515 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:03:52,516 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,518 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:03:52,519 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-21T22:03:52,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 264 msec 2024-11-21T22:03:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-21T22:03:52,571 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T22:03:52,571 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571 2024-11-21T22:03:52,572 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:52,619 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:03:52,619 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,627 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:03:52,638 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:52,685 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0005_000001 (auth:SIMPLE) from 127.0.0.1:59382 2024-11-21T22:03:52,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742107_1283 (size=216) 2024-11-21T22:03:52,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742107_1283 (size=216) 2024-11-21T22:03:52,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742107_1283 (size=216) 2024-11-21T22:03:52,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0005/container_1732226522247_0005_01_000001/launch_container.sh] 2024-11-21T22:03:52,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0005/container_1732226522247_0005_01_000001/container_tokens] 2024-11-21T22:03:52,695 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0005/container_1732226522247_0005_01_000001/sysfs] 2024-11-21T22:03:52,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742108_1284 (size=670) 2024-11-21T22:03:52,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742108_1284 (size=670) 2024-11-21T22:03:52,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742108_1284 (size=670) 2024-11-21T22:03:52,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:52,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:52,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:03:53,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-18016071931329061406.jar 2024-11-21T22:03:53,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-679222382095172771.jar 2024-11-21T22:03:53,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:03:53,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:03:53,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:03:53,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:03:53,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:03:53,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:03:53,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:03:53,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:03:53,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:03:53,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:03:53,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:03:53,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:03:53,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:53,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:53,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:53,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:53,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:03:53,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:53,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:03:53,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742109_1285 (size=131440) 2024-11-21T22:03:53,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742109_1285 (size=131440) 2024-11-21T22:03:53,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742109_1285 (size=131440) 2024-11-21T22:03:53,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742110_1286 (size=4188619) 2024-11-21T22:03:53,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742110_1286 (size=4188619) 2024-11-21T22:03:53,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742110_1286 (size=4188619) 2024-11-21T22:03:53,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742111_1287 (size=1323991) 2024-11-21T22:03:53,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742111_1287 (size=1323991) 2024-11-21T22:03:53,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742111_1287 (size=1323991) 2024-11-21T22:03:53,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742112_1288 (size=903739) 2024-11-21T22:03:53,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742112_1288 (size=903739) 2024-11-21T22:03:53,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742112_1288 (size=903739) 2024-11-21T22:03:53,997 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:03:54,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742113_1289 (size=8360083) 2024-11-21T22:03:54,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742113_1289 (size=8360083) 2024-11-21T22:03:54,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742113_1289 (size=8360083) 2024-11-21T22:03:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742114_1290 (size=1877034) 2024-11-21T22:03:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742114_1290 (size=1877034) 2024-11-21T22:03:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742114_1290 (size=1877034) 2024-11-21T22:03:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742115_1291 (size=77835) 2024-11-21T22:03:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742115_1291 (size=77835) 2024-11-21T22:03:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742115_1291 (size=77835) 2024-11-21T22:03:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742116_1292 (size=30949) 2024-11-21T22:03:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742116_1292 (size=30949) 2024-11-21T22:03:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742116_1292 (size=30949) 2024-11-21T22:03:54,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742117_1293 (size=1597347) 2024-11-21T22:03:54,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742117_1293 (size=1597347) 2024-11-21T22:03:54,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742117_1293 (size=1597347) 2024-11-21T22:03:54,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742118_1294 (size=4695811) 2024-11-21T22:03:54,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742118_1294 (size=4695811) 2024-11-21T22:03:54,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742118_1294 (size=4695811) 2024-11-21T22:03:54,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742119_1295 (size=232957) 2024-11-21T22:03:54,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742119_1295 (size=232957) 2024-11-21T22:03:54,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742119_1295 (size=232957) 2024-11-21T22:03:54,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742120_1296 (size=127628) 2024-11-21T22:03:54,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742120_1296 (size=127628) 2024-11-21T22:03:54,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742120_1296 (size=127628) 2024-11-21T22:03:54,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742121_1297 (size=20406) 2024-11-21T22:03:54,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742121_1297 (size=20406) 2024-11-21T22:03:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742121_1297 (size=20406) 2024-11-21T22:03:54,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742122_1298 (size=5175431) 2024-11-21T22:03:54,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742122_1298 (size=5175431) 2024-11-21T22:03:54,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742122_1298 (size=5175431) 2024-11-21T22:03:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742123_1299 (size=217634) 2024-11-21T22:03:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742123_1299 (size=217634) 2024-11-21T22:03:54,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742123_1299 (size=217634) 2024-11-21T22:03:54,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742124_1300 (size=1832290) 2024-11-21T22:03:54,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742124_1300 (size=1832290) 2024-11-21T22:03:54,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742124_1300 (size=1832290) 2024-11-21T22:03:54,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742125_1301 (size=440656) 2024-11-21T22:03:54,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742125_1301 (size=440656) 2024-11-21T22:03:54,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742125_1301 (size=440656) 2024-11-21T22:03:54,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742126_1302 (size=322274) 2024-11-21T22:03:54,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742126_1302 (size=322274) 2024-11-21T22:03:54,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742126_1302 (size=322274) 2024-11-21T22:03:54,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742127_1303 (size=503880) 2024-11-21T22:03:54,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742127_1303 (size=503880) 2024-11-21T22:03:54,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742127_1303 (size=503880) 2024-11-21T22:03:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742128_1304 (size=29229) 2024-11-21T22:03:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742128_1304 (size=29229) 2024-11-21T22:03:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742128_1304 (size=29229) 2024-11-21T22:03:54,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742129_1305 (size=24096) 2024-11-21T22:03:54,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742129_1305 (size=24096) 2024-11-21T22:03:54,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742129_1305 (size=24096) 2024-11-21T22:03:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742130_1306 (size=111872) 2024-11-21T22:03:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742130_1306 (size=111872) 2024-11-21T22:03:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742130_1306 (size=111872) 2024-11-21T22:03:54,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742131_1307 (size=45609) 2024-11-21T22:03:54,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742131_1307 (size=45609) 2024-11-21T22:03:54,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742131_1307 (size=45609) 2024-11-21T22:03:54,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742132_1308 (size=6424742) 2024-11-21T22:03:54,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742132_1308 (size=6424742) 2024-11-21T22:03:54,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742132_1308 (size=6424742) 2024-11-21T22:03:54,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742133_1309 (size=136454) 2024-11-21T22:03:54,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742133_1309 (size=136454) 2024-11-21T22:03:54,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742133_1309 (size=136454) 2024-11-21T22:03:54,335 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:03:54,338 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-21T22:03:54,340 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-21T22:03:54,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742134_1310 (size=378) 2024-11-21T22:03:54,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742134_1310 (size=378) 2024-11-21T22:03:54,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742134_1310 (size=378) 2024-11-21T22:03:54,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742135_1311 (size=15) 2024-11-21T22:03:54,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742135_1311 (size=15) 2024-11-21T22:03:54,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742135_1311 (size=15) 2024-11-21T22:03:54,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742136_1312 (size=303789) 2024-11-21T22:03:54,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742136_1312 (size=303789) 2024-11-21T22:03:54,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742136_1312 (size=303789) 2024-11-21T22:03:54,405 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:54,405 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:03:54,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:03:54,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-21T22:03:54,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:03:54,424 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-21T22:03:54,425 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-21T22:03:54,678 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0006_000001 (auth:SIMPLE) from 127.0.0.1:59396 2024-11-21T22:03:57,139 WARN [regionserver/2a884b411602:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-11-21T22:03:59,908 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0006_000001 (auth:SIMPLE) from 127.0.0.1:41182 2024-11-21T22:03:59,927 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:04:00,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742137_1313 (size=349439) 2024-11-21T22:04:00,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742137_1313 (size=349439) 2024-11-21T22:04:00,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742137_1313 (size=349439) 2024-11-21T22:04:02,133 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0006_000001 (auth:SIMPLE) from 127.0.0.1:41784 2024-11-21T22:04:03,002 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 8d7c6b050c10dcfdb96a9bcda29bc013 changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:04:03,002 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 11e3bba62c6cbb3574419cc6bedd5cee changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:04:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742138_1314 (size=4945) 2024-11-21T22:04:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742138_1314 (size=4945) 2024-11-21T22:04:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742138_1314 (size=4945) 2024-11-21T22:04:05,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742139_1315 (size=4945) 2024-11-21T22:04:05,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742139_1315 (size=4945) 2024-11-21T22:04:05,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742139_1315 (size=4945) 2024-11-21T22:04:05,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742140_1316 (size=17474) 2024-11-21T22:04:05,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742140_1316 (size=17474) 2024-11-21T22:04:05,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742140_1316 (size=17474) 2024-11-21T22:04:05,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742141_1317 (size=482) 2024-11-21T22:04:05,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742141_1317 (size=482) 2024-11-21T22:04:05,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742141_1317 (size=482) 2024-11-21T22:04:05,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742142_1318 (size=17474) 2024-11-21T22:04:05,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742142_1318 (size=17474) 2024-11-21T22:04:05,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742142_1318 (size=17474) 2024-11-21T22:04:05,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742143_1319 (size=349439) 2024-11-21T22:04:05,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742143_1319 (size=349439) 2024-11-21T22:04:05,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742143_1319 (size=349439) 2024-11-21T22:04:05,777 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0006_000001 (auth:SIMPLE) from 127.0.0.1:41792 2024-11-21T22:04:05,783 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_1/usercache/jenkins/appcache/application_1732226522247_0006/container_1732226522247_0006_01_000002/launch_container.sh] 2024-11-21T22:04:05,783 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_1/usercache/jenkins/appcache/application_1732226522247_0006/container_1732226522247_0006_01_000002/container_tokens] 2024-11-21T22:04:05,783 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_1/usercache/jenkins/appcache/application_1732226522247_0006/container_1732226522247_0006_01_000002/sysfs] 2024-11-21T22:04:07,736 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:04:07,737 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:04:07,743 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:07,743 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:04:07,743 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:04:07,743 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:07,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-21T22:04:07,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-21T22:04:07,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:07,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-21T22:04:07,744 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226632571/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-21T22:04:07,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:07,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-21T22:04:07,754 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226647754"}]},"ts":"1732226647754"} 2024-11-21T22:04:07,756 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-21T22:04:07,756 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-21T22:04:07,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-21T22:04:07,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, UNASSIGN}] 2024-11-21T22:04:07,759 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, UNASSIGN 2024-11-21T22:04:07,759 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=e8c8a757ef6be4acb9d13d01717f47b1, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, UNASSIGN because future has completed 2024-11-21T22:04:07,761 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:07,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:04:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-21T22:04:07,914 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:04:07,915 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:07,915 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing e8c8a757ef6be4acb9d13d01717f47b1, disabling compactions & flushes 2024-11-21T22:04:07,915 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:04:07,915 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:04:07,915 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. after waiting 0 ms 2024-11-21T22:04:07,915 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:04:07,925 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-21T22:04:07,926 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:07,927 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1. 2024-11-21T22:04:07,927 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for e8c8a757ef6be4acb9d13d01717f47b1: Waiting for close lock at 1732226647915Running coprocessor pre-close hooks at 1732226647915Disabling compacts and flushes for region at 1732226647915Disabling writes for close at 1732226647915Writing region close event to WAL at 1732226647917 (+2 ms)Running coprocessor post-close hooks at 1732226647926 (+9 ms)Closed at 1732226647927 (+1 ms) 2024-11-21T22:04:07,930 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:04:07,930 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=e8c8a757ef6be4acb9d13d01717f47b1, regionState=CLOSED 2024-11-21T22:04:07,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:04:07,934 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-21T22:04:07,934 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure e8c8a757ef6be4acb9d13d01717f47b1, server=2a884b411602,44629,1732226515126 in 172 msec 2024-11-21T22:04:07,936 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-21T22:04:07,936 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e8c8a757ef6be4acb9d13d01717f47b1, UNASSIGN in 176 msec 2024-11-21T22:04:07,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-21T22:04:07,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 180 msec 2024-11-21T22:04:07,939 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226647939"}]},"ts":"1732226647939"} 2024-11-21T22:04:07,941 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-21T22:04:07,941 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-21T22:04:07,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 190 msec 2024-11-21T22:04:08,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-21T22:04:08,072 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T22:04:08,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,077 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,081 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,082 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:04:08,082 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:04:08,084 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/recovered.edits] 2024-11-21T22:04:08,084 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/recovered.edits] 2024-11-21T22:04:08,088 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:04:08,089 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/cd1b699ca4784e6ca0bd670955d311b5.c195f33e03a4585450be8f5d5733a1fe to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/cd1b699ca4784e6ca0bd670955d311b5.c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:04:08,089 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/recovered.edits] 2024-11-21T22:04:08,089 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf/cd1b699ca4784e6ca0bd670955d311b5 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/cf/cd1b699ca4784e6ca0bd670955d311b5 2024-11-21T22:04:08,092 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/fec05b66213340f19530017c9cf0c9be.903fe2287b2a9b8f8e279126f6f21af5 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/cf/fec05b66213340f19530017c9cf0c9be.903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:04:08,093 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/recovered.edits/8.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe/recovered.edits/8.seqid 2024-11-21T22:04:08,093 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c195f33e03a4585450be8f5d5733a1fe 2024-11-21T22:04:08,094 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf/fec05b66213340f19530017c9cf0c9be to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/cf/fec05b66213340f19530017c9cf0c9be 2024-11-21T22:04:08,094 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/recovered.edits/12.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1/recovered.edits/12.seqid 2024-11-21T22:04:08,094 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e8c8a757ef6be4acb9d13d01717f47b1 2024-11-21T22:04:08,096 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/recovered.edits/8.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5/recovered.edits/8.seqid 2024-11-21T22:04:08,097 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/903fe2287b2a9b8f8e279126f6f21af5 2024-11-21T22:04:08,097 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-21T22:04:08,099 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,102 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-21T22:04:08,104 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-21T22:04:08,106 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,106 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-21T22:04:08,106 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226648106"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:08,108 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-21T22:04:08,108 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e8c8a757ef6be4acb9d13d01717f47b1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T22:04:08,108 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-21T22:04:08,108 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226648108"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:08,110 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-21T22:04:08,115 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 43 msec 2024-11-21T22:04:08,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,125 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,126 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T22:04:08,126 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T22:04:08,126 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T22:04:08,126 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T22:04:08,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,133 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,133 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-21T22:04:08,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:08,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:08,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:08,135 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,135 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T22:04:08,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-21T22:04:08,139 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226648138"}]},"ts":"1732226648138"} 2024-11-21T22:04:08,141 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-21T22:04:08,141 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-21T22:04:08,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-21T22:04:08,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, UNASSIGN}] 2024-11-21T22:04:08,144 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, UNASSIGN 2024-11-21T22:04:08,144 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, UNASSIGN 2024-11-21T22:04:08,145 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=8d7c6b050c10dcfdb96a9bcda29bc013, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:04:08,145 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=11e3bba62c6cbb3574419cc6bedd5cee, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:08,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, UNASSIGN because future has completed 2024-11-21T22:04:08,147 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:08,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:04:08,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, UNASSIGN because future has completed 2024-11-21T22:04:08,148 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:08,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:04:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-21T22:04:08,299 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 8d7c6b050c10dcfdb96a9bcda29bc013, disabling compactions & flushes 2024-11-21T22:04:08,300 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. after waiting 0 ms 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:04:08,300 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:08,300 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 11e3bba62c6cbb3574419cc6bedd5cee, disabling compactions & flushes 2024-11-21T22:04:08,301 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:04:08,301 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:04:08,301 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. after waiting 0 ms 2024-11-21T22:04:08,301 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:04:08,305 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:04:08,305 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:08,305 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013. 2024-11-21T22:04:08,305 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 8d7c6b050c10dcfdb96a9bcda29bc013: Waiting for close lock at 1732226648300Running coprocessor pre-close hooks at 1732226648300Disabling compacts and flushes for region at 1732226648300Disabling writes for close at 1732226648300Writing region close event to WAL at 1732226648301 (+1 ms)Running coprocessor post-close hooks at 1732226648305 (+4 ms)Closed at 1732226648305 2024-11-21T22:04:08,306 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:04:08,306 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:08,307 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee. 2024-11-21T22:04:08,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 11e3bba62c6cbb3574419cc6bedd5cee: Waiting for close lock at 1732226648300Running coprocessor pre-close hooks at 1732226648300Disabling compacts and flushes for region at 1732226648300Disabling writes for close at 1732226648301 (+1 ms)Writing region close event to WAL at 1732226648302 (+1 ms)Running coprocessor post-close hooks at 1732226648306 (+4 ms)Closed at 1732226648307 (+1 ms) 2024-11-21T22:04:08,309 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:04:08,309 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=8d7c6b050c10dcfdb96a9bcda29bc013, regionState=CLOSED 2024-11-21T22:04:08,313 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:04:08,313 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=11e3bba62c6cbb3574419cc6bedd5cee, regionState=CLOSED 2024-11-21T22:04:08,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:04:08,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:04:08,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-21T22:04:08,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure 8d7c6b050c10dcfdb96a9bcda29bc013, server=2a884b411602,41399,1732226514979 in 168 msec 2024-11-21T22:04:08,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=8d7c6b050c10dcfdb96a9bcda29bc013, UNASSIGN in 175 msec 2024-11-21T22:04:08,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-11-21T22:04:08,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 11e3bba62c6cbb3574419cc6bedd5cee, server=2a884b411602,44629,1732226515126 in 169 msec 2024-11-21T22:04:08,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-11-21T22:04:08,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=11e3bba62c6cbb3574419cc6bedd5cee, UNASSIGN in 176 msec 2024-11-21T22:04:08,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-21T22:04:08,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 179 msec 2024-11-21T22:04:08,324 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226648323"}]},"ts":"1732226648323"} 2024-11-21T22:04:08,326 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-21T22:04:08,326 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-21T22:04:08,329 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 192 msec 2024-11-21T22:04:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-21T22:04:08,451 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T22:04:08,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,454 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,454 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,456 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,458 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:04:08,458 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:04:08,460 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/recovered.edits] 2024-11-21T22:04:08,460 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/recovered.edits] 2024-11-21T22:04:08,463 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf/f13e2c11cb3c4b838027b9c2947187f8 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/cf/f13e2c11cb3c4b838027b9c2947187f8 2024-11-21T22:04:08,463 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf/3a5e0b7732e74c0bb8d83741ae34b65f to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/cf/3a5e0b7732e74c0bb8d83741ae34b65f 2024-11-21T22:04:08,465 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013/recovered.edits/9.seqid 2024-11-21T22:04:08,465 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee/recovered.edits/9.seqid 2024-11-21T22:04:08,466 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/8d7c6b050c10dcfdb96a9bcda29bc013 2024-11-21T22:04:08,466 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithMergeRegion/11e3bba62c6cbb3574419cc6bedd5cee 2024-11-21T22:04:08,466 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-21T22:04:08,468 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,470 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-21T22:04:08,473 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-21T22:04:08,474 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,474 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-21T22:04:08,474 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226648474"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:08,474 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226648474"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:08,477 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:04:08,477 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8d7c6b050c10dcfdb96a9bcda29bc013, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732226629026.8d7c6b050c10dcfdb96a9bcda29bc013.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 11e3bba62c6cbb3574419cc6bedd5cee, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732226629026.11e3bba62c6cbb3574419cc6bedd5cee.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:04:08,477 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-21T22:04:08,477 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226648477"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:08,479 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-21T22:04:08,480 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 28 msec 2024-11-21T22:04:08,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,491 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T22:04:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T22:04:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T22:04:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T22:04:08,500 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,500 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-21T22:04:08,501 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,501 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T22:04:08,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-21T22:04:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-21T22:04:08,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:08,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-21T22:04:08,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:08,539 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=803 (was 791) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:42225 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/2a884b411602:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 126384) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:56876 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:35480 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:39389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4511 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:51270 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1040155669_1 at /127.0.0.1:35440 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=818 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=527 (was 547), ProcessCount=16 (was 16), AvailableMemoryMB=847 (was 794) - AvailableMemoryMB LEAK? - 2024-11-21T22:04:08,539 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-11-21T22:04:08,555 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=803, OpenFileDescriptor=818, MaxFileDescriptor=1048576, SystemLoadAverage=527, ProcessCount=16, AvailableMemoryMB=846 2024-11-21T22:04:08,555 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-11-21T22:04:08,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:04:08,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:08,559 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:04:08,559 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:08,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-21T22:04:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T22:04:08,560 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:04:08,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742144_1320 (size=407) 2024-11-21T22:04:08,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742144_1320 (size=407) 2024-11-21T22:04:08,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742144_1320 (size=407) 2024-11-21T22:04:08,568 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c6db3138bf06c444c5026f02ba852b6c, NAME => 'testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:08,568 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 43f6c24386ba68b0a0ec27ead216b41b, NAME => 'testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:08,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742145_1321 (size=68) 2024-11-21T22:04:08,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742145_1321 (size=68) 2024-11-21T22:04:08,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742145_1321 (size=68) 2024-11-21T22:04:08,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742146_1322 (size=68) 2024-11-21T22:04:08,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742146_1322 (size=68) 2024-11-21T22:04:08,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742146_1322 (size=68) 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing c6db3138bf06c444c5026f02ba852b6c, disabling compactions & flushes 2024-11-21T22:04:08,575 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. after waiting 0 ms 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,575 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for c6db3138bf06c444c5026f02ba852b6c: Waiting for close lock at 1732226648575Disabling compacts and flushes for region at 1732226648575Disabling writes for close at 1732226648575Writing region close event to WAL at 1732226648575Closed at 1732226648575 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:08,575 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 43f6c24386ba68b0a0ec27ead216b41b, disabling compactions & flushes 2024-11-21T22:04:08,575 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. after waiting 0 ms 2024-11-21T22:04:08,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,576 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,576 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 43f6c24386ba68b0a0ec27ead216b41b: Waiting for close lock at 1732226648575Disabling compacts and flushes for region at 1732226648575Disabling writes for close at 1732226648576 (+1 ms)Writing region close event to WAL at 1732226648576Closed at 1732226648576 2024-11-21T22:04:08,577 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:04:08,577 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732226648577"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226648577"}]},"ts":"1732226648577"} 2024-11-21T22:04:08,577 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732226648577"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226648577"}]},"ts":"1732226648577"} 2024-11-21T22:04:08,579 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:04:08,580 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:04:08,580 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226648580"}]},"ts":"1732226648580"} 2024-11-21T22:04:08,582 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-21T22:04:08,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:04:08,584 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:04:08,584 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:04:08,584 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:04:08,584 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:04:08,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, ASSIGN}] 2024-11-21T22:04:08,585 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, ASSIGN 2024-11-21T22:04:08,585 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, ASSIGN 2024-11-21T22:04:08,586 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:04:08,586 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:04:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T22:04:08,737 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:04:08,737 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=43f6c24386ba68b0a0ec27ead216b41b, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:08,737 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=c6db3138bf06c444c5026f02ba852b6c, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:08,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, ASSIGN because future has completed 2024-11-21T22:04:08,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:04:08,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, ASSIGN because future has completed 2024-11-21T22:04:08,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6db3138bf06c444c5026f02ba852b6c, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:04:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T22:04:08,898 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,898 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 43f6c24386ba68b0a0ec27ead216b41b, NAME => 'testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:04:08,899 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. service=AccessControlService 2024-11-21T22:04:08,899 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:08,899 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,899 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:08,899 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,899 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,900 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,900 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => c6db3138bf06c444c5026f02ba852b6c, NAME => 'testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:04:08,900 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. service=AccessControlService 2024-11-21T22:04:08,900 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:08,900 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,900 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:08,900 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,900 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,901 INFO [StoreOpener-43f6c24386ba68b0a0ec27ead216b41b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,902 INFO [StoreOpener-c6db3138bf06c444c5026f02ba852b6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,902 INFO [StoreOpener-43f6c24386ba68b0a0ec27ead216b41b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 43f6c24386ba68b0a0ec27ead216b41b columnFamilyName cf 2024-11-21T22:04:08,902 DEBUG [StoreOpener-43f6c24386ba68b0a0ec27ead216b41b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:08,902 INFO [StoreOpener-43f6c24386ba68b0a0ec27ead216b41b-1 {}] regionserver.HStore(327): Store=43f6c24386ba68b0a0ec27ead216b41b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:08,902 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,903 INFO [StoreOpener-c6db3138bf06c444c5026f02ba852b6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c6db3138bf06c444c5026f02ba852b6c columnFamilyName cf 2024-11-21T22:04:08,903 DEBUG [StoreOpener-c6db3138bf06c444c5026f02ba852b6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:08,903 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,903 INFO [StoreOpener-c6db3138bf06c444c5026f02ba852b6c-1 {}] regionserver.HStore(327): Store=c6db3138bf06c444c5026f02ba852b6c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:08,903 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,903 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,904 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,904 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,904 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,904 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,904 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,904 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,905 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,906 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,907 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:08,907 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 43f6c24386ba68b0a0ec27ead216b41b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60021486, jitterRate=-0.10561016201972961}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:08,907 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:08,908 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:08,908 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened c6db3138bf06c444c5026f02ba852b6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61318351, jitterRate=-0.08628536760807037}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:08,908 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:08,908 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for c6db3138bf06c444c5026f02ba852b6c: Running coprocessor pre-open hook at 1732226648901Writing region info on filesystem at 1732226648901Initializing all the Stores at 1732226648901Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226648901Cleaning up temporary data from old regions at 1732226648904 (+3 ms)Running coprocessor post-open hooks at 1732226648908 (+4 ms)Region opened successfully at 1732226648908 2024-11-21T22:04:08,908 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 43f6c24386ba68b0a0ec27ead216b41b: Running coprocessor pre-open hook at 1732226648899Writing region info on filesystem at 1732226648899Initializing all the Stores at 1732226648900 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226648900Cleaning up temporary data from old regions at 1732226648904 (+4 ms)Running coprocessor post-open hooks at 1732226648907 (+3 ms)Region opened successfully at 1732226648908 (+1 ms) 2024-11-21T22:04:08,909 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c., pid=135, masterSystemTime=1732226648896 2024-11-21T22:04:08,909 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b., pid=134, masterSystemTime=1732226648895 2024-11-21T22:04:08,910 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,911 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:08,911 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=c6db3138bf06c444c5026f02ba852b6c, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:08,911 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,911 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:08,912 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=43f6c24386ba68b0a0ec27ead216b41b, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:08,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6db3138bf06c444c5026f02ba852b6c, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:04:08,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:04:08,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=132 2024-11-21T22:04:08,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure c6db3138bf06c444c5026f02ba852b6c, server=2a884b411602,36419,1732226515183 in 170 msec 2024-11-21T22:04:08,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=133 2024-11-21T22:04:08,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b, server=2a884b411602,44629,1732226515126 in 173 msec 2024-11-21T22:04:08,919 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, ASSIGN in 332 msec 2024-11-21T22:04:08,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=131 2024-11-21T22:04:08,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, ASSIGN in 333 msec 2024-11-21T22:04:08,924 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:04:08,924 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226648924"}]},"ts":"1732226648924"} 2024-11-21T22:04:08,926 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-21T22:04:08,927 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:04:08,927 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-21T22:04:08,932 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T22:04:08,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:08,993 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:09,000 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:09,000 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:09,000 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:09,000 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:09,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 444 msec 2024-11-21T22:04:09,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T22:04:09,192 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T22:04:09,192 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-21T22:04:09,193 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:09,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-21T22:04:09,200 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:09,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-21T22:04:09,200 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:09,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T22:04:09,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226649203 (current time:1732226649203). 2024-11-21T22:04:09,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:04:09,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-21T22:04:09,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:09,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ded3921, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:09,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:09,205 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78dcb941, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:09,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,207 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55312, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:09,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b50480d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:09,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:09,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:09,211 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:09,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:09,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:09,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,212 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:09,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22bbca6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:09,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:09,214 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:09,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:09,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:09,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ec91ee0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:09,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:09,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,216 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:09,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a54abd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:09,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:09,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:09,218 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:09,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:09,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:09,221 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:09,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,222 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T22:04:09,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:09,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T22:04:09,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-21T22:04:09,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-21T22:04:09,224 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:09,225 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:09,227 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:09,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742147_1323 (size=170) 2024-11-21T22:04:09,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742147_1323 (size=170) 2024-11-21T22:04:09,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742147_1323 (size=170) 2024-11-21T22:04:09,234 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:09,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b}] 2024-11-21T22:04:09,235 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:09,235 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:09,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-21T22:04:09,377 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-21T22:04:09,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-21T22:04:09,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-21T22:04:09,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:09,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:09,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 43f6c24386ba68b0a0ec27ead216b41b: 2024-11-21T22:04:09,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for c6db3138bf06c444c5026f02ba852b6c: 2024-11-21T22:04:09,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-21T22:04:09,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-21T22:04:09,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:09,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:09,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:04:09,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:04:09,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742148_1324 (size=71) 2024-11-21T22:04:09,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742148_1324 (size=71) 2024-11-21T22:04:09,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742148_1324 (size=71) 2024-11-21T22:04:09,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:09,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-21T22:04:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-21T22:04:09,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:09,402 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:09,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742149_1325 (size=71) 2024-11-21T22:04:09,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742149_1325 (size=71) 2024-11-21T22:04:09,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742149_1325 (size=71) 2024-11-21T22:04:09,405 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b in 169 msec 2024-11-21T22:04:09,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:09,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-21T22:04:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-21T22:04:09,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:09,405 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:09,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=137, resume processing ppid=136 2024-11-21T22:04:09,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c in 172 msec 2024-11-21T22:04:09,408 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:09,408 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:09,409 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:09,409 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,409 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742150_1326 (size=552) 2024-11-21T22:04:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742150_1326 (size=552) 2024-11-21T22:04:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742150_1326 (size=552) 2024-11-21T22:04:09,420 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:09,424 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:09,424 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,425 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:09,425 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-21T22:04:09,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 202 msec 2024-11-21T22:04:09,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-21T22:04:09,542 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T22:04:09,549 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='029c73820b83fcfd276bc76ca509b7b3f', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:09,550 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='1236817a2c80ca7a736c8086cdb971ec2', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:04:09,552 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='280a25183ecc4abf020a2d8050d4e1bf7', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:04:09,553 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3e5574040037822ca0c9da8865019ecd3', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:04:09,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36419 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:09,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:09,561 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:09,563 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-21T22:04:09,563 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:09,563 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:09,564 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:09,568 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:09,573 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:09,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T22:04:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226649575 (current time:1732226649575). 2024-11-21T22:04:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:04:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-21T22:04:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@93fe29b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:09,577 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:09,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:09,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:09,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9672281, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:09,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:09,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,579 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55352, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:09,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f130ac0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:09,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:09,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:09,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60660, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:09,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,582 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64f47ae5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:09,584 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aec7129, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,585 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55372, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:09,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e6a27bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:09,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:09,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:09,587 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:09,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:09,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:09,589 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:09,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:09,601 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:09,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T22:04:09,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T22:04:09,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-21T22:04:09,604 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-21T22:04:09,605 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:09,607 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:09,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742151_1327 (size=165) 2024-11-21T22:04:09,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742151_1327 (size=165) 2024-11-21T22:04:09,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742151_1327 (size=165) 2024-11-21T22:04:09,614 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:09,614 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b}] 2024-11-21T22:04:09,614 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:09,615 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-21T22:04:09,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-21T22:04:09,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-21T22:04:09,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:09,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:09,767 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing c6db3138bf06c444c5026f02ba852b6c 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-21T22:04:09,767 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 43f6c24386ba68b0a0ec27ead216b41b 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-21T22:04:09,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/.tmp/cf/9a67dcff41fe48e888c660d99a89ecd9 is 71, key is 0c51bf24a6921889fddd24878d804bd4/cf:q/1732226649557/Put/seqid=0 2024-11-21T22:04:09,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/.tmp/cf/ed7693ec69fc4a3d9535db5804a8ece3 is 71, key is 11cd3fe09927cfd58d3a737d41e6cd53/cf:q/1732226649559/Put/seqid=0 2024-11-21T22:04:09,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742153_1329 (size=8324) 2024-11-21T22:04:09,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742153_1329 (size=8324) 2024-11-21T22:04:09,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742153_1329 (size=8324) 2024-11-21T22:04:09,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/.tmp/cf/ed7693ec69fc4a3d9535db5804a8ece3 2024-11-21T22:04:09,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/.tmp/cf/ed7693ec69fc4a3d9535db5804a8ece3 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf/ed7693ec69fc4a3d9535db5804a8ece3 2024-11-21T22:04:09,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742152_1328 (size=5288) 2024-11-21T22:04:09,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742152_1328 (size=5288) 2024-11-21T22:04:09,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742152_1328 (size=5288) 2024-11-21T22:04:09,796 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/.tmp/cf/9a67dcff41fe48e888c660d99a89ecd9 2024-11-21T22:04:09,799 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf/ed7693ec69fc4a3d9535db5804a8ece3, entries=47, sequenceid=6, filesize=8.1 K 2024-11-21T22:04:09,799 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 43f6c24386ba68b0a0ec27ead216b41b in 33ms, sequenceid=6, compaction requested=false 2024-11-21T22:04:09,799 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 43f6c24386ba68b0a0ec27ead216b41b: 2024-11-21T22:04:09,799 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. for snaptb0-testExportExpiredSnapshot completed. 2024-11-21T22:04:09,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:09,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf/ed7693ec69fc4a3d9535db5804a8ece3] hfiles 2024-11-21T22:04:09,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf/ed7693ec69fc4a3d9535db5804a8ece3 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/.tmp/cf/9a67dcff41fe48e888c660d99a89ecd9 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf/9a67dcff41fe48e888c660d99a89ecd9 2024-11-21T22:04:09,805 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf/9a67dcff41fe48e888c660d99a89ecd9, entries=3, sequenceid=6, filesize=5.2 K 2024-11-21T22:04:09,806 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for c6db3138bf06c444c5026f02ba852b6c in 40ms, sequenceid=6, compaction requested=false 2024-11-21T22:04:09,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for c6db3138bf06c444c5026f02ba852b6c: 2024-11-21T22:04:09,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. for snaptb0-testExportExpiredSnapshot completed. 2024-11-21T22:04:09,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:09,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf/9a67dcff41fe48e888c660d99a89ecd9] hfiles 2024-11-21T22:04:09,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf/9a67dcff41fe48e888c660d99a89ecd9 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742154_1330 (size=110) 2024-11-21T22:04:09,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742154_1330 (size=110) 2024-11-21T22:04:09,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742154_1330 (size=110) 2024-11-21T22:04:09,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:09,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-21T22:04:09,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-21T22:04:09,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:09,809 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:09,812 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b in 196 msec 2024-11-21T22:04:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742155_1331 (size=110) 2024-11-21T22:04:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742155_1331 (size=110) 2024-11-21T22:04:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742155_1331 (size=110) 2024-11-21T22:04:09,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:09,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-21T22:04:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-21T22:04:09,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:09,817 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:09,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=140, resume processing ppid=139 2024-11-21T22:04:09,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6db3138bf06c444c5026f02ba852b6c in 204 msec 2024-11-21T22:04:09,820 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:09,820 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:09,821 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:09,821 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,822 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742156_1332 (size=630) 2024-11-21T22:04:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742156_1332 (size=630) 2024-11-21T22:04:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742156_1332 (size=630) 2024-11-21T22:04:09,834 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:09,839 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:09,839 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:09,840 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:09,840 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-21T22:04:09,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 238 msec 2024-11-21T22:04:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-21T22:04:09,922 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T22:04:09,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:04:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-21T22:04:09,929 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:04:09,929 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:09,929 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-21T22:04:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T22:04:09,931 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:04:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742157_1333 (size=400) 2024-11-21T22:04:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742157_1333 (size=400) 2024-11-21T22:04:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742157_1333 (size=400) 2024-11-21T22:04:09,945 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f7a1444316755d092bee2d182dde7a0c, NAME => 'testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:09,946 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7c3e82354b20e9e6eb82600246f4d616, NAME => 'testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:09,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742159_1335 (size=61) 2024-11-21T22:04:09,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742159_1335 (size=61) 2024-11-21T22:04:09,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742159_1335 (size=61) 2024-11-21T22:04:09,957 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:09,957 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 7c3e82354b20e9e6eb82600246f4d616, disabling compactions & flushes 2024-11-21T22:04:09,957 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:09,957 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:09,957 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. after waiting 0 ms 2024-11-21T22:04:09,957 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:09,957 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:09,957 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7c3e82354b20e9e6eb82600246f4d616: Waiting for close lock at 1732226649957Disabling compacts and flushes for region at 1732226649957Disabling writes for close at 1732226649957Writing region close event to WAL at 1732226649957Closed at 1732226649957 2024-11-21T22:04:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742158_1334 (size=61) 2024-11-21T22:04:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742158_1334 (size=61) 2024-11-21T22:04:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742158_1334 (size=61) 2024-11-21T22:04:09,963 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:09,963 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing f7a1444316755d092bee2d182dde7a0c, disabling compactions & flushes 2024-11-21T22:04:09,963 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:09,963 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:09,963 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. after waiting 0 ms 2024-11-21T22:04:09,963 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:09,963 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:09,963 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for f7a1444316755d092bee2d182dde7a0c: Waiting for close lock at 1732226649963Disabling compacts and flushes for region at 1732226649963Disabling writes for close at 1732226649963Writing region close event to WAL at 1732226649963Closed at 1732226649963 2024-11-21T22:04:09,964 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:04:09,964 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732226649964"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226649964"}]},"ts":"1732226649964"} 2024-11-21T22:04:09,964 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732226649964"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226649964"}]},"ts":"1732226649964"} 2024-11-21T22:04:09,967 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:04:09,967 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:04:09,968 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226649967"}]},"ts":"1732226649967"} 2024-11-21T22:04:09,969 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-21T22:04:09,969 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:04:09,970 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:04:09,970 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:04:09,970 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:04:09,970 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:04:09,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f7a1444316755d092bee2d182dde7a0c, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=7c3e82354b20e9e6eb82600246f4d616, ASSIGN}] 2024-11-21T22:04:09,972 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=7c3e82354b20e9e6eb82600246f4d616, ASSIGN 2024-11-21T22:04:09,972 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f7a1444316755d092bee2d182dde7a0c, ASSIGN 2024-11-21T22:04:09,972 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f7a1444316755d092bee2d182dde7a0c, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:04:09,972 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=7c3e82354b20e9e6eb82600246f4d616, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:04:10,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T22:04:10,123 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:04:10,123 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=f7a1444316755d092bee2d182dde7a0c, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:10,123 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=7c3e82354b20e9e6eb82600246f4d616, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:10,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f7a1444316755d092bee2d182dde7a0c, ASSIGN because future has completed 2024-11-21T22:04:10,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7a1444316755d092bee2d182dde7a0c, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:04:10,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=7c3e82354b20e9e6eb82600246f4d616, ASSIGN because future has completed 2024-11-21T22:04:10,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c3e82354b20e9e6eb82600246f4d616, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:04:10,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T22:04:10,279 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:10,280 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => f7a1444316755d092bee2d182dde7a0c, NAME => 'testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:04:10,280 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. service=AccessControlService 2024-11-21T22:04:10,280 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:10,280 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c3e82354b20e9e6eb82600246f4d616, NAME => 'testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:04:10,280 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:10,280 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. service=AccessControlService 2024-11-21T22:04:10,280 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,280 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:10,280 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:10,281 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,281 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,281 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,281 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:10,281 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,281 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,282 INFO [StoreOpener-f7a1444316755d092bee2d182dde7a0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,282 INFO [StoreOpener-7c3e82354b20e9e6eb82600246f4d616-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,283 INFO [StoreOpener-7c3e82354b20e9e6eb82600246f4d616-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c3e82354b20e9e6eb82600246f4d616 columnFamilyName cf 2024-11-21T22:04:10,283 INFO [StoreOpener-f7a1444316755d092bee2d182dde7a0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7a1444316755d092bee2d182dde7a0c columnFamilyName cf 2024-11-21T22:04:10,283 DEBUG [StoreOpener-7c3e82354b20e9e6eb82600246f4d616-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:10,283 DEBUG [StoreOpener-f7a1444316755d092bee2d182dde7a0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:10,283 INFO [StoreOpener-f7a1444316755d092bee2d182dde7a0c-1 {}] regionserver.HStore(327): Store=f7a1444316755d092bee2d182dde7a0c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:10,283 INFO [StoreOpener-7c3e82354b20e9e6eb82600246f4d616-1 {}] regionserver.HStore(327): Store=7c3e82354b20e9e6eb82600246f4d616/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:10,284 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,284 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,284 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,284 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,284 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,284 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,285 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,285 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,285 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,285 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,286 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,286 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,288 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:10,299 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened f7a1444316755d092bee2d182dde7a0c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74279448, jitterRate=0.1068500280380249}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:10,299 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,300 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for f7a1444316755d092bee2d182dde7a0c: Running coprocessor pre-open hook at 1732226650281Writing region info on filesystem at 1732226650281Initializing all the Stores at 1732226650282 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226650282Cleaning up temporary data from old regions at 1732226650285 (+3 ms)Running coprocessor post-open hooks at 1732226650299 (+14 ms)Region opened successfully at 1732226650300 (+1 ms) 2024-11-21T22:04:10,302 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c., pid=145, masterSystemTime=1732226650276 2024-11-21T22:04:10,302 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:10,303 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 7c3e82354b20e9e6eb82600246f4d616; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75495675, jitterRate=0.12497322261333466}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:10,303 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,303 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 7c3e82354b20e9e6eb82600246f4d616: Running coprocessor pre-open hook at 1732226650281Writing region info on filesystem at 1732226650281Initializing all the Stores at 1732226650282 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226650282Cleaning up temporary data from old regions at 1732226650285 (+3 ms)Running coprocessor post-open hooks at 1732226650303 (+18 ms)Region opened successfully at 1732226650303 2024-11-21T22:04:10,304 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:10,304 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:10,304 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=f7a1444316755d092bee2d182dde7a0c, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:10,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7a1444316755d092bee2d182dde7a0c, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:04:10,307 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616., pid=146, masterSystemTime=1732226650277 2024-11-21T22:04:10,312 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:10,313 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:10,313 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=7c3e82354b20e9e6eb82600246f4d616, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:10,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-21T22:04:10,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure f7a1444316755d092bee2d182dde7a0c, server=2a884b411602,44629,1732226515126 in 182 msec 2024-11-21T22:04:10,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c3e82354b20e9e6eb82600246f4d616, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:04:10,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f7a1444316755d092bee2d182dde7a0c, ASSIGN in 343 msec 2024-11-21T22:04:10,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-21T22:04:10,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 7c3e82354b20e9e6eb82600246f4d616, server=2a884b411602,36419,1732226515183 in 189 msec 2024-11-21T22:04:10,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=142 2024-11-21T22:04:10,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=7c3e82354b20e9e6eb82600246f4d616, ASSIGN in 346 msec 2024-11-21T22:04:10,319 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:04:10,319 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226650319"}]},"ts":"1732226650319"} 2024-11-21T22:04:10,321 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-21T22:04:10,321 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:04:10,322 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-21T22:04:10,325 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T22:04:10,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:10,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:10,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:10,496 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,546 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:10,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 620 msec 2024-11-21T22:04:10,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T22:04:10,561 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-21T22:04:10,561 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-21T22:04:10,561 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:10,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-21T22:04:10,565 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:10,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-21T22:04:10,565 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:10,570 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='07bf1b5e8eb5c464a93b64fcb11da3c42', locateType=CURRENT is [region=testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:04:10,571 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1316f0f1fdc053acc9bab554e250bd2f7', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:10,573 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='2194cb76ff5d83becaa77d5d5e2369cd0', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:10,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:10,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36419 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:10,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:10,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-21T22:04:10,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:10,582 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:10,584 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:10,589 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T22:04:10,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-21T22:04:10,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-21T22:04:10,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:10,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33d76676, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:10,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:10,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:10,596 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:10,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:10,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:10,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e632500, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:10,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:10,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:10,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:10,597 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:10,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13b244a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:10,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:10,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:10,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:10,599 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:10,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:10,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:10,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:10,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:10,600 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:10,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ddca589, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:10,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:10,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:10,602 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:10,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:10,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:10,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10d9940a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:10,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:10,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:10,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:10,603 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55390, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:10,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@476c5200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:10,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:10,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:10,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:10,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:10,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:10,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:10,608 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:10,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:10,610 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T22:04:10,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-21T22:04:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-21T22:04:10,613 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:10,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T22:04:10,613 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:10,616 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:10,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742160_1336 (size=152) 2024-11-21T22:04:10,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742160_1336 (size=152) 2024-11-21T22:04:10,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742160_1336 (size=152) 2024-11-21T22:04:10,622 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:10,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7a1444316755d092bee2d182dde7a0c}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c3e82354b20e9e6eb82600246f4d616}] 2024-11-21T22:04:10,623 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,623 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T22:04:10,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-21T22:04:10,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-21T22:04:10,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:10,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:10,775 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing f7a1444316755d092bee2d182dde7a0c 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-21T22:04:10,775 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing 7c3e82354b20e9e6eb82600246f4d616 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-21T22:04:10,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/.tmp/cf/5e9b1cca72074d31b130b87940425795 is 71, key is 15ede533b5b26ce5eb92fcb8e62c462a/cf:q/1732226650578/Put/seqid=0 2024-11-21T22:04:10,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/.tmp/cf/5b3bd01fc7fb4b00b60a46da4b6ae047 is 71, key is 028b04299ee1555c8eb755cdf9a374c9/cf:q/1732226650577/Put/seqid=0 2024-11-21T22:04:10,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742161_1337 (size=8190) 2024-11-21T22:04:10,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742162_1338 (size=5422) 2024-11-21T22:04:10,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742162_1338 (size=5422) 2024-11-21T22:04:10,795 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/.tmp/cf/5b3bd01fc7fb4b00b60a46da4b6ae047 2024-11-21T22:04:10,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742162_1338 (size=5422) 2024-11-21T22:04:10,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742161_1337 (size=8190) 2024-11-21T22:04:10,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742161_1337 (size=8190) 2024-11-21T22:04:10,795 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/.tmp/cf/5e9b1cca72074d31b130b87940425795 2024-11-21T22:04:10,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/.tmp/cf/5e9b1cca72074d31b130b87940425795 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/cf/5e9b1cca72074d31b130b87940425795 2024-11-21T22:04:10,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/.tmp/cf/5b3bd01fc7fb4b00b60a46da4b6ae047 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/cf/5b3bd01fc7fb4b00b60a46da4b6ae047 2024-11-21T22:04:10,806 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/cf/5e9b1cca72074d31b130b87940425795, entries=45, sequenceid=5, filesize=8.0 K 2024-11-21T22:04:10,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 7c3e82354b20e9e6eb82600246f4d616 in 32ms, sequenceid=5, compaction requested=false 2024-11-21T22:04:10,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-21T22:04:10,808 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/cf/5b3bd01fc7fb4b00b60a46da4b6ae047, entries=5, sequenceid=5, filesize=5.3 K 2024-11-21T22:04:10,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for 7c3e82354b20e9e6eb82600246f4d616: 2024-11-21T22:04:10,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. for snapshot-testExportExpiredSnapshot completed. 2024-11-21T22:04:10,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:10,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/cf/5e9b1cca72074d31b130b87940425795] hfiles 2024-11-21T22:04:10,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/cf/5e9b1cca72074d31b130b87940425795 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,809 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for f7a1444316755d092bee2d182dde7a0c in 34ms, sequenceid=5, compaction requested=false 2024-11-21T22:04:10,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for f7a1444316755d092bee2d182dde7a0c: 2024-11-21T22:04:10,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. for snapshot-testExportExpiredSnapshot completed. 2024-11-21T22:04:10,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:10,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/cf/5b3bd01fc7fb4b00b60a46da4b6ae047] hfiles 2024-11-21T22:04:10,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/cf/5b3bd01fc7fb4b00b60a46da4b6ae047 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742163_1339 (size=103) 2024-11-21T22:04:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742164_1340 (size=103) 2024-11-21T22:04:10,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742163_1339 (size=103) 2024-11-21T22:04:10,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742164_1340 (size=103) 2024-11-21T22:04:10,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742164_1340 (size=103) 2024-11-21T22:04:10,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742163_1339 (size=103) 2024-11-21T22:04:10,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:04:10,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:04:10,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-21T22:04:10,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-21T22:04:10,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-21T22:04:10,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-21T22:04:10,815 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,815 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:04:10,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:04:10,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c3e82354b20e9e6eb82600246f4d616 in 194 msec 2024-11-21T22:04:10,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=148, resume processing ppid=147 2024-11-21T22:04:10,818 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:10,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7a1444316755d092bee2d182dde7a0c in 194 msec 2024-11-21T22:04:10,819 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:10,819 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:10,819 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,820 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742165_1341 (size=609) 2024-11-21T22:04:10,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742165_1341 (size=609) 2024-11-21T22:04:10,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742165_1341 (size=609) 2024-11-21T22:04:10,833 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:10,837 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:10,838 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-21T22:04:10,839 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:10,839 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-21T22:04:10,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 228 msec 2024-11-21T22:04:10,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T22:04:10,932 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-21T22:04:11,860 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0006_000001 (auth:SIMPLE) from 127.0.0.1:57208 2024-11-21T22:04:11,871 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0006/container_1732226522247_0006_01_000001/launch_container.sh] 2024-11-21T22:04:11,871 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0006/container_1732226522247_0006_01_000001/container_tokens] 2024-11-21T22:04:11,871 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0006/container_1732226522247_0006_01_000001/sysfs] 2024-11-21T22:04:13,458 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:04:14,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-21T22:04:14,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-21T22:04:14,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-21T22:04:14,424 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-21T22:04:14,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T22:04:14,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T22:04:19,927 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:04:20,944 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226660944 2024-11-21T22:04:20,944 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226660944, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226660944, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:20,976 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:20,976 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226660944, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226660944/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-21T22:04:20,979 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:04:20,980 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:04:20,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-21T22:04:20,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:20,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T22:04:20,984 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226660984"}]},"ts":"1732226660984"} 2024-11-21T22:04:20,986 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-21T22:04:20,986 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-21T22:04:20,987 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-21T22:04:20,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, UNASSIGN}] 2024-11-21T22:04:20,989 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, UNASSIGN 2024-11-21T22:04:20,989 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, UNASSIGN 2024-11-21T22:04:20,990 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=c6db3138bf06c444c5026f02ba852b6c, regionState=CLOSING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:20,990 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=43f6c24386ba68b0a0ec27ead216b41b, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:20,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, UNASSIGN because future has completed 2024-11-21T22:04:20,993 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:20,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:04:20,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, UNASSIGN because future has completed 2024-11-21T22:04:20,994 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:20,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure c6db3138bf06c444c5026f02ba852b6c, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:04:21,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T22:04:21,146 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:21,146 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 43f6c24386ba68b0a0ec27ead216b41b, disabling compactions & flushes 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing c6db3138bf06c444c5026f02ba852b6c, disabling compactions & flushes 2024-11-21T22:04:21,146 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:21,146 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. after waiting 0 ms 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. after waiting 0 ms 2024-11-21T22:04:21,146 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:21,153 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:04:21,153 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:21,153 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b. 2024-11-21T22:04:21,153 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 43f6c24386ba68b0a0ec27ead216b41b: Waiting for close lock at 1732226661146Running coprocessor pre-close hooks at 1732226661146Disabling compacts and flushes for region at 1732226661146Disabling writes for close at 1732226661146Writing region close event to WAL at 1732226661147 (+1 ms)Running coprocessor post-close hooks at 1732226661153 (+6 ms)Closed at 1732226661153 2024-11-21T22:04:21,154 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:04:21,155 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:21,155 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c. 2024-11-21T22:04:21,155 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for c6db3138bf06c444c5026f02ba852b6c: Waiting for close lock at 1732226661146Running coprocessor pre-close hooks at 1732226661146Disabling compacts and flushes for region at 1732226661146Disabling writes for close at 1732226661146Writing region close event to WAL at 1732226661147 (+1 ms)Running coprocessor post-close hooks at 1732226661155 (+8 ms)Closed at 1732226661155 2024-11-21T22:04:21,163 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:21,164 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=43f6c24386ba68b0a0ec27ead216b41b, regionState=CLOSED 2024-11-21T22:04:21,165 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:21,165 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=c6db3138bf06c444c5026f02ba852b6c, regionState=CLOSED 2024-11-21T22:04:21,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:04:21,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure c6db3138bf06c444c5026f02ba852b6c, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:04:21,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-21T22:04:21,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 43f6c24386ba68b0a0ec27ead216b41b, server=2a884b411602,44629,1732226515126 in 176 msec 2024-11-21T22:04:21,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=43f6c24386ba68b0a0ec27ead216b41b, UNASSIGN in 183 msec 2024-11-21T22:04:21,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-11-21T22:04:21,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure c6db3138bf06c444c5026f02ba852b6c, server=2a884b411602,36419,1732226515183 in 176 msec 2024-11-21T22:04:21,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=151 2024-11-21T22:04:21,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c6db3138bf06c444c5026f02ba852b6c, UNASSIGN in 184 msec 2024-11-21T22:04:21,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-21T22:04:21,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 187 msec 2024-11-21T22:04:21,177 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226661177"}]},"ts":"1732226661177"} 2024-11-21T22:04:21,179 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-21T22:04:21,179 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-21T22:04:21,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 200 msec 2024-11-21T22:04:21,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T22:04:21,302 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T22:04:21,303 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,305 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,306 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,309 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,312 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:21,312 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:21,314 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/recovered.edits] 2024-11-21T22:04:21,315 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/recovered.edits] 2024-11-21T22:04:21,319 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf/9a67dcff41fe48e888c660d99a89ecd9 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/cf/9a67dcff41fe48e888c660d99a89ecd9 2024-11-21T22:04:21,320 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf/ed7693ec69fc4a3d9535db5804a8ece3 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/cf/ed7693ec69fc4a3d9535db5804a8ece3 2024-11-21T22:04:21,324 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c/recovered.edits/9.seqid 2024-11-21T22:04:21,324 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/c6db3138bf06c444c5026f02ba852b6c 2024-11-21T22:04:21,324 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b/recovered.edits/9.seqid 2024-11-21T22:04:21,325 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportExpiredSnapshot/43f6c24386ba68b0a0ec27ead216b41b 2024-11-21T22:04:21,325 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-21T22:04:21,328 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,332 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-21T22:04:21,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-21T22:04:21,334 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-21T22:04:21,338 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-21T22:04:21,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,340 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,340 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,340 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,340 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-21T22:04:21,341 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-11-21T22:04:21,341 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T22:04:21,341 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226661340"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:21,341 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226661340"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:21,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,342 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-11-21T22:04:21,342 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T22:04:21,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-21T22:04:21,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,346 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:04:21,346 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c6db3138bf06c444c5026f02ba852b6c, NAME => 'testtb-testExportExpiredSnapshot,,1732226648556.c6db3138bf06c444c5026f02ba852b6c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 43f6c24386ba68b0a0ec27ead216b41b, NAME => 'testtb-testExportExpiredSnapshot,1,1732226648556.43f6c24386ba68b0a0ec27ead216b41b.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:04:21,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,346 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-21T22:04:21,346 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226661346"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:21,349 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-21T22:04:21,350 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 47 msec 2024-11-21T22:04:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-21T22:04:21,451 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-21T22:04:21,452 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T22:04:21,467 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-21T22:04:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-21T22:04:21,472 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-21T22:04:21,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-21T22:04:21,476 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-21T22:04:21,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-21T22:04:21,516 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=799 (was 803), OpenFileDescriptor=793 (was 818), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=482 (was 527), ProcessCount=14 (was 16), AvailableMemoryMB=1115 (was 846) - AvailableMemoryMB LEAK? - 2024-11-21T22:04:21,516 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-21T22:04:21,534 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=799, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=482, ProcessCount=13, AvailableMemoryMB=1113 2024-11-21T22:04:21,534 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-21T22:04:21,536 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:04:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:21,538 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:04:21,538 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:21,538 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-21T22:04:21,539 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:04:21,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T22:04:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742166_1342 (size=412) 2024-11-21T22:04:21,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742166_1342 (size=412) 2024-11-21T22:04:21,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742166_1342 (size=412) 2024-11-21T22:04:21,546 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 508873bce77c67f9192b51ca526a8b8e, NAME => 'testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:21,546 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b07b5ae35da061b2e1421edffef76f55, NAME => 'testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:21,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742167_1343 (size=73) 2024-11-21T22:04:21,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742167_1343 (size=73) 2024-11-21T22:04:21,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742167_1343 (size=73) 2024-11-21T22:04:21,557 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:21,557 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 508873bce77c67f9192b51ca526a8b8e, disabling compactions & flushes 2024-11-21T22:04:21,557 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,557 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,557 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. after waiting 0 ms 2024-11-21T22:04:21,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,558 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,558 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 508873bce77c67f9192b51ca526a8b8e: Waiting for close lock at 1732226661557Disabling compacts and flushes for region at 1732226661557Disabling writes for close at 1732226661557Writing region close event to WAL at 1732226661558 (+1 ms)Closed at 1732226661558 2024-11-21T22:04:21,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742168_1344 (size=73) 2024-11-21T22:04:21,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742168_1344 (size=73) 2024-11-21T22:04:21,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742168_1344 (size=73) 2024-11-21T22:04:21,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:21,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing b07b5ae35da061b2e1421edffef76f55, disabling compactions & flushes 2024-11-21T22:04:21,559 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. after waiting 0 ms 2024-11-21T22:04:21,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,560 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for b07b5ae35da061b2e1421edffef76f55: Waiting for close lock at 1732226661559Disabling compacts and flushes for region at 1732226661559Disabling writes for close at 1732226661559Writing region close event to WAL at 1732226661560 (+1 ms)Closed at 1732226661560 2024-11-21T22:04:21,560 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:04:21,561 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732226661561"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226661561"}]},"ts":"1732226661561"} 2024-11-21T22:04:21,561 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732226661561"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226661561"}]},"ts":"1732226661561"} 2024-11-21T22:04:21,563 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:04:21,564 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:04:21,564 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226661564"}]},"ts":"1732226661564"} 2024-11-21T22:04:21,565 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-21T22:04:21,565 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:04:21,567 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:04:21,567 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:04:21,567 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:04:21,567 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:04:21,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, ASSIGN}] 2024-11-21T22:04:21,569 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, ASSIGN 2024-11-21T22:04:21,569 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, ASSIGN 2024-11-21T22:04:21,570 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:04:21,570 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:04:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T22:04:21,720 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:04:21,721 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=b07b5ae35da061b2e1421edffef76f55, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:21,721 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=508873bce77c67f9192b51ca526a8b8e, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:04:21,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, ASSIGN because future has completed 2024-11-21T22:04:21,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure b07b5ae35da061b2e1421edffef76f55, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:04:21,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, ASSIGN because future has completed 2024-11-21T22:04:21,724 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 508873bce77c67f9192b51ca526a8b8e, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:04:21,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T22:04:21,877 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,878 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => b07b5ae35da061b2e1421edffef76f55, NAME => 'testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:04:21,878 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. service=AccessControlService 2024-11-21T22:04:21,878 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:21,878 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,879 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:21,879 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,879 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,879 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,879 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => 508873bce77c67f9192b51ca526a8b8e, NAME => 'testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:04:21,880 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. service=AccessControlService 2024-11-21T22:04:21,880 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:21,880 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,880 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:21,880 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,880 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,882 INFO [StoreOpener-b07b5ae35da061b2e1421edffef76f55-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,882 INFO [StoreOpener-508873bce77c67f9192b51ca526a8b8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,883 INFO [StoreOpener-b07b5ae35da061b2e1421edffef76f55-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b07b5ae35da061b2e1421edffef76f55 columnFamilyName cf 2024-11-21T22:04:21,883 INFO [StoreOpener-508873bce77c67f9192b51ca526a8b8e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 508873bce77c67f9192b51ca526a8b8e columnFamilyName cf 2024-11-21T22:04:21,883 DEBUG [StoreOpener-b07b5ae35da061b2e1421edffef76f55-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:21,883 DEBUG [StoreOpener-508873bce77c67f9192b51ca526a8b8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:21,884 INFO [StoreOpener-508873bce77c67f9192b51ca526a8b8e-1 {}] regionserver.HStore(327): Store=508873bce77c67f9192b51ca526a8b8e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:21,884 INFO [StoreOpener-b07b5ae35da061b2e1421edffef76f55-1 {}] regionserver.HStore(327): Store=b07b5ae35da061b2e1421edffef76f55/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:21,884 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,884 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,885 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,887 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,887 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,888 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:21,888 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:21,889 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened b07b5ae35da061b2e1421edffef76f55; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69292755, jitterRate=0.03254251182079315}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:21,889 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened 508873bce77c67f9192b51ca526a8b8e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62622895, jitterRate=-0.06684614717960358}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:21,889 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:21,889 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:21,889 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for 508873bce77c67f9192b51ca526a8b8e: Running coprocessor pre-open hook at 1732226661880Writing region info on filesystem at 1732226661880Initializing all the Stores at 1732226661881 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226661881Cleaning up temporary data from old regions at 1732226661885 (+4 ms)Running coprocessor post-open hooks at 1732226661889 (+4 ms)Region opened successfully at 1732226661889 2024-11-21T22:04:21,889 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for b07b5ae35da061b2e1421edffef76f55: Running coprocessor pre-open hook at 1732226661879Writing region info on filesystem at 1732226661879Initializing all the Stores at 1732226661880 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226661880Cleaning up temporary data from old regions at 1732226661885 (+5 ms)Running coprocessor post-open hooks at 1732226661889 (+4 ms)Region opened successfully at 1732226661889 2024-11-21T22:04:21,890 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., pid=161, masterSystemTime=1732226661875 2024-11-21T22:04:21,890 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55., pid=160, masterSystemTime=1732226661874 2024-11-21T22:04:21,892 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,892 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:21,892 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=508873bce77c67f9192b51ca526a8b8e, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:04:21,892 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,892 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:21,893 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=b07b5ae35da061b2e1421edffef76f55, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:21,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 508873bce77c67f9192b51ca526a8b8e, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:04:21,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure b07b5ae35da061b2e1421edffef76f55, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:04:21,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=159 2024-11-21T22:04:21,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure 508873bce77c67f9192b51ca526a8b8e, server=2a884b411602,41399,1732226514979 in 172 msec 2024-11-21T22:04:21,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, ASSIGN in 330 msec 2024-11-21T22:04:21,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=158 2024-11-21T22:04:21,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure b07b5ae35da061b2e1421edffef76f55, server=2a884b411602,36419,1732226515183 in 174 msec 2024-11-21T22:04:21,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-11-21T22:04:21,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, ASSIGN in 331 msec 2024-11-21T22:04:21,900 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:04:21,901 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226661900"}]},"ts":"1732226661900"} 2024-11-21T22:04:21,902 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-21T22:04:21,903 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:04:21,903 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-21T22:04:21,906 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T22:04:21,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,948 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:21,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:21,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 421 msec 2024-11-21T22:04:22,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T22:04:22,164 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T22:04:22,164 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-21T22:04:22,164 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:22,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-21T22:04:22,172 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:22,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-21T22:04:22,173 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:04:22,177 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:04:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226662177 (current time:1732226662177). 2024-11-21T22:04:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:04:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-21T22:04:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:22,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4431057a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:22,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:22,184 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:22,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:22,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:22,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45496398, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:22,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:22,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,186 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:22,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695a90fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:22,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:22,190 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:22,191 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35090, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:22,193 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:04:22,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:22,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,193 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:22,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2289e0c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:22,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:22,200 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:22,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:22,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:22,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5db41f90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:22,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:22,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,203 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34478, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:22,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fb4cbd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:22,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:22,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:22,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:22,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:22,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:22,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57832, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:22,214 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:04:22,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:22,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T22:04:22,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:22,216 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:22,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:04:22,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-21T22:04:22,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T22:04:22,220 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:22,221 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:22,228 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:22,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742169_1345 (size=185) 2024-11-21T22:04:22,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742169_1345 (size=185) 2024-11-21T22:04:22,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742169_1345 (size=185) 2024-11-21T22:04:22,270 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:22,270 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e}] 2024-11-21T22:04:22,272 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:22,273 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:22,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T22:04:22,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-21T22:04:22,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-21T22:04:22,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for 508873bce77c67f9192b51ca526a8b8e: 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:22,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for b07b5ae35da061b2e1421edffef76f55: 2024-11-21T22:04:22,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-21T22:04:22,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:22,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:22,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:04:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742170_1346 (size=76) 2024-11-21T22:04:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742170_1346 (size=76) 2024-11-21T22:04:22,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742171_1347 (size=76) 2024-11-21T22:04:22,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742170_1346 (size=76) 2024-11-21T22:04:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:22,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-21T22:04:22,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-21T22:04:22,457 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:22,457 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:22,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742171_1347 (size=76) 2024-11-21T22:04:22,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742171_1347 (size=76) 2024-11-21T22:04:22,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:22,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-21T22:04:22,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-21T22:04:22,459 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:22,460 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:22,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e in 190 msec 2024-11-21T22:04:22,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-21T22:04:22,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55 in 191 msec 2024-11-21T22:04:22,464 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:22,466 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:22,467 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:22,467 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:22,470 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:22,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742172_1348 (size=567) 2024-11-21T22:04:22,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742172_1348 (size=567) 2024-11-21T22:04:22,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742172_1348 (size=567) 2024-11-21T22:04:22,539 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:22,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T22:04:22,547 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:22,547 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:22,552 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:22,552 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-21T22:04:22,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 337 msec 2024-11-21T22:04:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T22:04:22,851 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T22:04:22,858 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0e5e1e27261576ae23c11130d080a13da', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:22,864 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='13e7032f60be3b5c010f6987e5ea314b1', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,866 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2ba08154ac0159aadae145d6c97de7db5', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,866 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='480e4606a0232759fc0606255705ee0ef', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,868 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='3255182f9e1330671e988cd1a74dc1a84', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36419 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:22,872 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='62b3d90c7cd09edd7004793921ad616d6', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='51b3b7dd3a31aac80c0ea65f829184b2f', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='d386c40aa2338ece49d468b5a629adc5', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:22,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:22,879 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:04:22,882 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-21T22:04:22,882 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:22,882 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:22,884 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:04:22,890 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:04:22,898 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T22:04:22,902 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:04:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226662902 (current time:1732226662902). 2024-11-21T22:04:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:04:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-21T22:04:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3adb936, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:22,906 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:22,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:22,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:22,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1175501, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:22,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:22,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,908 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:22,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ec75b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:22,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:22,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:22,911 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:22,912 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:04:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,912 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bcfbd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:22,914 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:22,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:22,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:22,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dbb2f0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:22,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:22,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,916 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:22,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c56c317, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:22,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:22,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:22,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:22,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:22,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:22,922 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57842, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:22,923 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:04:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:22,923 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:22,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T22:04:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:22,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T22:04:22,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-21T22:04:22,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T22:04:22,929 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:22,930 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:22,932 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:22,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742173_1349 (size=180) 2024-11-21T22:04:22,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742173_1349 (size=180) 2024-11-21T22:04:22,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742173_1349 (size=180) 2024-11-21T22:04:22,956 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:22,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e}] 2024-11-21T22:04:22,957 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:22,958 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:23,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:04:23,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T22:04:23,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-21T22:04:23,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:23,111 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing b07b5ae35da061b2e1421edffef76f55 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-21T22:04:23,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-21T22:04:23,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:23,113 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing 508873bce77c67f9192b51ca526a8b8e 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-21T22:04:23,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/.tmp/cf/0ab3ac416be049c9b645f01bdd2567c5 is 71, key is 014b2bd561af7612a7f4b3623e978739/cf:q/1732226662871/Put/seqid=0 2024-11-21T22:04:23,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/.tmp/cf/eafc2d9c36794d3ebb43701771770b73 is 71, key is 1a2dae297572505a5134eeb926501af9/cf:q/1732226662877/Put/seqid=0 2024-11-21T22:04:23,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742174_1350 (size=5424) 2024-11-21T22:04:23,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742174_1350 (size=5424) 2024-11-21T22:04:23,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742174_1350 (size=5424) 2024-11-21T22:04:23,188 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/.tmp/cf/0ab3ac416be049c9b645f01bdd2567c5 2024-11-21T22:04:23,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742175_1351 (size=8190) 2024-11-21T22:04:23,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742175_1351 (size=8190) 2024-11-21T22:04:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742175_1351 (size=8190) 2024-11-21T22:04:23,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/.tmp/cf/0ab3ac416be049c9b645f01bdd2567c5 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf/0ab3ac416be049c9b645f01bdd2567c5 2024-11-21T22:04:23,194 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/.tmp/cf/eafc2d9c36794d3ebb43701771770b73 2024-11-21T22:04:23,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/.tmp/cf/eafc2d9c36794d3ebb43701771770b73 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf/eafc2d9c36794d3ebb43701771770b73 2024-11-21T22:04:23,203 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf/0ab3ac416be049c9b645f01bdd2567c5, entries=5, sequenceid=6, filesize=5.3 K 2024-11-21T22:04:23,204 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for b07b5ae35da061b2e1421edffef76f55 in 93ms, sequenceid=6, compaction requested=false 2024-11-21T22:04:23,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-21T22:04:23,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for b07b5ae35da061b2e1421edffef76f55: 2024-11-21T22:04:23,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-21T22:04:23,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:23,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf/0ab3ac416be049c9b645f01bdd2567c5] hfiles 2024-11-21T22:04:23,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf/0ab3ac416be049c9b645f01bdd2567c5 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,212 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf/eafc2d9c36794d3ebb43701771770b73, entries=45, sequenceid=6, filesize=8.0 K 2024-11-21T22:04:23,213 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 508873bce77c67f9192b51ca526a8b8e in 101ms, sequenceid=6, compaction requested=false 2024-11-21T22:04:23,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for 508873bce77c67f9192b51ca526a8b8e: 2024-11-21T22:04:23,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-21T22:04:23,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:23,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf/eafc2d9c36794d3ebb43701771770b73] hfiles 2024-11-21T22:04:23,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf/eafc2d9c36794d3ebb43701771770b73 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742176_1352 (size=115) 2024-11-21T22:04:23,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742176_1352 (size=115) 2024-11-21T22:04:23,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742176_1352 (size=115) 2024-11-21T22:04:23,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:23,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-21T22:04:23,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T22:04:23,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-21T22:04:23,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:23,242 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:23,246 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b07b5ae35da061b2e1421edffef76f55 in 288 msec 2024-11-21T22:04:23,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742177_1353 (size=115) 2024-11-21T22:04:23,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742177_1353 (size=115) 2024-11-21T22:04:23,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742177_1353 (size=115) 2024-11-21T22:04:23,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:23,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-21T22:04:23,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-21T22:04:23,254 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:23,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:23,263 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-11-21T22:04:23,263 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 508873bce77c67f9192b51ca526a8b8e in 304 msec 2024-11-21T22:04:23,263 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:23,265 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:23,266 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:23,266 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,267 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742178_1354 (size=645) 2024-11-21T22:04:23,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742178_1354 (size=645) 2024-11-21T22:04:23,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742178_1354 (size=645) 2024-11-21T22:04:23,289 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:23,298 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:23,299 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,301 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:23,301 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-21T22:04:23,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 376 msec 2024-11-21T22:04:23,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T22:04:23,551 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T22:04:23,552 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552 2024-11-21T22:04:23,552 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:23,592 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:23,592 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,594 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:04:23,598 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:23,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742180_1356 (size=567) 2024-11-21T22:04:23,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742180_1356 (size=567) 2024-11-21T22:04:23,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742180_1356 (size=567) 2024-11-21T22:04:23,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742179_1355 (size=185) 2024-11-21T22:04:23,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742179_1355 (size=185) 2024-11-21T22:04:23,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742179_1355 (size=185) 2024-11-21T22:04:23,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:23,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:23,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-21T22:04:24,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-21T22:04:24,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-21T22:04:24,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-17020245588655345355.jar 2024-11-21T22:04:24,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-5934526514219120172.jar 2024-11-21T22:04:24,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:24,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:04:24,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:04:24,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:04:24,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:04:24,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:04:24,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:04:24,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:04:24,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:04:24,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:04:24,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:04:24,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:04:24,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:24,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:24,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:04:24,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:24,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:24,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:04:24,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:04:24,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742181_1357 (size=131440) 2024-11-21T22:04:24,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742181_1357 (size=131440) 2024-11-21T22:04:24,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742181_1357 (size=131440) 2024-11-21T22:04:24,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742182_1358 (size=4188619) 2024-11-21T22:04:24,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742182_1358 (size=4188619) 2024-11-21T22:04:24,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742182_1358 (size=4188619) 2024-11-21T22:04:24,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742183_1359 (size=1323991) 2024-11-21T22:04:24,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742183_1359 (size=1323991) 2024-11-21T22:04:24,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742183_1359 (size=1323991) 2024-11-21T22:04:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742184_1360 (size=903739) 2024-11-21T22:04:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742184_1360 (size=903739) 2024-11-21T22:04:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742184_1360 (size=903739) 2024-11-21T22:04:24,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742185_1361 (size=8360083) 2024-11-21T22:04:24,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742185_1361 (size=8360083) 2024-11-21T22:04:24,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742185_1361 (size=8360083) 2024-11-21T22:04:24,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742186_1362 (size=6424742) 2024-11-21T22:04:24,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742186_1362 (size=6424742) 2024-11-21T22:04:24,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742186_1362 (size=6424742) 2024-11-21T22:04:24,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742187_1363 (size=1877034) 2024-11-21T22:04:24,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742187_1363 (size=1877034) 2024-11-21T22:04:24,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742187_1363 (size=1877034) 2024-11-21T22:04:24,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742188_1364 (size=77835) 2024-11-21T22:04:24,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742188_1364 (size=77835) 2024-11-21T22:04:24,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742188_1364 (size=77835) 2024-11-21T22:04:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742189_1365 (size=30949) 2024-11-21T22:04:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742189_1365 (size=30949) 2024-11-21T22:04:25,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742189_1365 (size=30949) 2024-11-21T22:04:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742190_1366 (size=1597347) 2024-11-21T22:04:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742190_1366 (size=1597347) 2024-11-21T22:04:25,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742190_1366 (size=1597347) 2024-11-21T22:04:25,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742191_1367 (size=4695811) 2024-11-21T22:04:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742191_1367 (size=4695811) 2024-11-21T22:04:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742191_1367 (size=4695811) 2024-11-21T22:04:25,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742192_1368 (size=232957) 2024-11-21T22:04:25,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742192_1368 (size=232957) 2024-11-21T22:04:25,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742192_1368 (size=232957) 2024-11-21T22:04:25,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742193_1369 (size=127628) 2024-11-21T22:04:25,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742193_1369 (size=127628) 2024-11-21T22:04:25,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742193_1369 (size=127628) 2024-11-21T22:04:25,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742194_1370 (size=20406) 2024-11-21T22:04:25,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742194_1370 (size=20406) 2024-11-21T22:04:25,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742194_1370 (size=20406) 2024-11-21T22:04:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742195_1371 (size=5175431) 2024-11-21T22:04:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742195_1371 (size=5175431) 2024-11-21T22:04:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742195_1371 (size=5175431) 2024-11-21T22:04:25,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742196_1372 (size=217634) 2024-11-21T22:04:25,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742196_1372 (size=217634) 2024-11-21T22:04:25,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742196_1372 (size=217634) 2024-11-21T22:04:25,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742197_1373 (size=1832290) 2024-11-21T22:04:25,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742197_1373 (size=1832290) 2024-11-21T22:04:25,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742197_1373 (size=1832290) 2024-11-21T22:04:25,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742198_1374 (size=322274) 2024-11-21T22:04:25,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742198_1374 (size=322274) 2024-11-21T22:04:25,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742198_1374 (size=322274) 2024-11-21T22:04:25,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742199_1375 (size=503880) 2024-11-21T22:04:25,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742199_1375 (size=503880) 2024-11-21T22:04:25,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742199_1375 (size=503880) 2024-11-21T22:04:25,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742200_1376 (size=440656) 2024-11-21T22:04:25,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742200_1376 (size=440656) 2024-11-21T22:04:25,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742200_1376 (size=440656) 2024-11-21T22:04:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742201_1377 (size=29229) 2024-11-21T22:04:25,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742201_1377 (size=29229) 2024-11-21T22:04:25,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742201_1377 (size=29229) 2024-11-21T22:04:25,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742202_1378 (size=24096) 2024-11-21T22:04:25,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742202_1378 (size=24096) 2024-11-21T22:04:25,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742202_1378 (size=24096) 2024-11-21T22:04:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742203_1379 (size=111872) 2024-11-21T22:04:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742203_1379 (size=111872) 2024-11-21T22:04:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742203_1379 (size=111872) 2024-11-21T22:04:25,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742204_1380 (size=45609) 2024-11-21T22:04:25,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742204_1380 (size=45609) 2024-11-21T22:04:25,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742204_1380 (size=45609) 2024-11-21T22:04:25,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742205_1381 (size=136454) 2024-11-21T22:04:25,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742205_1381 (size=136454) 2024-11-21T22:04:25,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742205_1381 (size=136454) 2024-11-21T22:04:25,640 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:04:25,644 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-21T22:04:25,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742206_1382 (size=7) 2024-11-21T22:04:25,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742206_1382 (size=7) 2024-11-21T22:04:25,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742206_1382 (size=7) 2024-11-21T22:04:26,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742207_1383 (size=10) 2024-11-21T22:04:26,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742207_1383 (size=10) 2024-11-21T22:04:26,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742207_1383 (size=10) 2024-11-21T22:04:26,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742208_1384 (size=303637) 2024-11-21T22:04:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742208_1384 (size=303637) 2024-11-21T22:04:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742208_1384 (size=303637) 2024-11-21T22:04:26,904 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:04:26,904 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:04:26,966 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:04:27,473 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0007_000001 (auth:SIMPLE) from 127.0.0.1:43050 2024-11-21T22:04:33,078 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0007_000001 (auth:SIMPLE) from 127.0.0.1:49028 2024-11-21T22:04:33,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742209_1385 (size=349263) 2024-11-21T22:04:33,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742209_1385 (size=349263) 2024-11-21T22:04:33,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742209_1385 (size=349263) 2024-11-21T22:04:34,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742210_1386 (size=8568) 2024-11-21T22:04:34,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742210_1386 (size=8568) 2024-11-21T22:04:34,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742210_1386 (size=8568) 2024-11-21T22:04:34,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742211_1387 (size=460) 2024-11-21T22:04:34,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742211_1387 (size=460) 2024-11-21T22:04:34,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742211_1387 (size=460) 2024-11-21T22:04:34,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742212_1388 (size=8568) 2024-11-21T22:04:34,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742212_1388 (size=8568) 2024-11-21T22:04:34,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742212_1388 (size=8568) 2024-11-21T22:04:34,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742213_1389 (size=349263) 2024-11-21T22:04:34,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742213_1389 (size=349263) 2024-11-21T22:04:34,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742213_1389 (size=349263) 2024-11-21T22:04:36,032 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:04:36,034 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:04:36,110 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:36,110 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:04:36,111 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:04:36,111 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:36,112 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-21T22:04:36,112 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-21T22:04:36,112 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:36,113 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-21T22:04:36,113 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226663552/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-21T22:04:36,120 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-21T22:04:36,128 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226676128"}]},"ts":"1732226676128"} 2024-11-21T22:04:36,130 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-21T22:04:36,131 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-21T22:04:36,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-21T22:04:36,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, UNASSIGN}] 2024-11-21T22:04:36,135 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, UNASSIGN 2024-11-21T22:04:36,136 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, UNASSIGN 2024-11-21T22:04:36,137 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=508873bce77c67f9192b51ca526a8b8e, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:04:36,137 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b07b5ae35da061b2e1421edffef76f55, regionState=CLOSING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:04:36,140 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=2a884b411602,36419,1732226515183, table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T22:04:36,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, UNASSIGN because future has completed 2024-11-21T22:04:36,140 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:36,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 508873bce77c67f9192b51ca526a8b8e, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:04:36,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, UNASSIGN because future has completed 2024-11-21T22:04:36,142 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:04:36,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure b07b5ae35da061b2e1421edffef76f55, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:04:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-21T22:04:36,293 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:36,294 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:36,294 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing 508873bce77c67f9192b51ca526a8b8e, disabling compactions & flushes 2024-11-21T22:04:36,294 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:36,294 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:36,294 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. after waiting 0 ms 2024-11-21T22:04:36,294 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:36,296 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:36,296 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:04:36,296 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing b07b5ae35da061b2e1421edffef76f55, disabling compactions & flushes 2024-11-21T22:04:36,296 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:36,296 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:36,296 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. after waiting 0 ms 2024-11-21T22:04:36,296 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:36,301 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:04:36,301 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:36,302 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e. 2024-11-21T22:04:36,302 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for 508873bce77c67f9192b51ca526a8b8e: Waiting for close lock at 1732226676294Running coprocessor pre-close hooks at 1732226676294Disabling compacts and flushes for region at 1732226676294Disabling writes for close at 1732226676294Writing region close event to WAL at 1732226676296 (+2 ms)Running coprocessor post-close hooks at 1732226676301 (+5 ms)Closed at 1732226676302 (+1 ms) 2024-11-21T22:04:36,302 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:04:36,303 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:04:36,303 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55. 2024-11-21T22:04:36,303 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for b07b5ae35da061b2e1421edffef76f55: Waiting for close lock at 1732226676296Running coprocessor pre-close hooks at 1732226676296Disabling compacts and flushes for region at 1732226676296Disabling writes for close at 1732226676296Writing region close event to WAL at 1732226676297 (+1 ms)Running coprocessor post-close hooks at 1732226676303 (+6 ms)Closed at 1732226676303 2024-11-21T22:04:36,304 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed 508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:36,304 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=508873bce77c67f9192b51ca526a8b8e, regionState=CLOSED 2024-11-21T22:04:36,304 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:36,306 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b07b5ae35da061b2e1421edffef76f55, regionState=CLOSED 2024-11-21T22:04:36,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 508873bce77c67f9192b51ca526a8b8e, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:04:36,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure b07b5ae35da061b2e1421edffef76f55, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:04:36,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-21T22:04:36,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure 508873bce77c67f9192b51ca526a8b8e, server=2a884b411602,41399,1732226514979 in 168 msec 2024-11-21T22:04:36,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-21T22:04:36,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure b07b5ae35da061b2e1421edffef76f55, server=2a884b411602,36419,1732226515183 in 169 msec 2024-11-21T22:04:36,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=508873bce77c67f9192b51ca526a8b8e, UNASSIGN in 178 msec 2024-11-21T22:04:36,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-21T22:04:36,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b07b5ae35da061b2e1421edffef76f55, UNASSIGN in 180 msec 2024-11-21T22:04:36,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-21T22:04:36,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 186 msec 2024-11-21T22:04:36,321 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226676321"}]},"ts":"1732226676321"} 2024-11-21T22:04:36,323 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-21T22:04:36,323 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-21T22:04:36,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 204 msec 2024-11-21T22:04:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-21T22:04:36,442 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T22:04:36,442 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,444 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,446 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,449 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,467 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:36,469 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:36,469 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/recovered.edits] 2024-11-21T22:04:36,474 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/recovered.edits] 2024-11-21T22:04:36,476 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf/eafc2d9c36794d3ebb43701771770b73 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/cf/eafc2d9c36794d3ebb43701771770b73 2024-11-21T22:04:36,479 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf/0ab3ac416be049c9b645f01bdd2567c5 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/cf/0ab3ac416be049c9b645f01bdd2567c5 2024-11-21T22:04:36,480 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e/recovered.edits/9.seqid 2024-11-21T22:04:36,481 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/508873bce77c67f9192b51ca526a8b8e 2024-11-21T22:04:36,482 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55/recovered.edits/9.seqid 2024-11-21T22:04:36,483 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testEmptyExportFileSystemState/b07b5ae35da061b2e1421edffef76f55 2024-11-21T22:04:36,483 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-21T22:04:36,486 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,489 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-21T22:04:36,491 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-21T22:04:36,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-21T22:04:36,493 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226676493"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:36,494 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226676493"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:36,497 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:04:36,497 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b07b5ae35da061b2e1421edffef76f55, NAME => 'testtb-testEmptyExportFileSystemState,,1732226661535.b07b5ae35da061b2e1421edffef76f55.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 508873bce77c67f9192b51ca526a8b8e, NAME => 'testtb-testEmptyExportFileSystemState,1,1732226661535.508873bce77c67f9192b51ca526a8b8e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:04:36,497 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-21T22:04:36,498 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226676497"}]},"ts":"9223372036854775807"} 2024-11-21T22:04:36,504 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-21T22:04:36,505 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 63 msec 2024-11-21T22:04:36,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,513 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T22:04:36,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T22:04:36,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T22:04:36,515 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T22:04:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,526 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:36,526 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-21T22:04:36,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:36,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:36,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:36,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:36,528 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-21T22:04:36,528 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T22:04:36,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-21T22:04:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:36,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-21T22:04:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-21T22:04:36,609 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=806 (was 799) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:42536 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:49414 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1720914081_1 at /127.0.0.1:47296 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:42873 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:35398 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5421 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 130135) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42873 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:42423 from appattempt_1732226522247_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=818 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=578 (was 482) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=2174 (was 1113) - AvailableMemoryMB LEAK? - 2024-11-21T22:04:36,609 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-11-21T22:04:36,648 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=806, OpenFileDescriptor=818, MaxFileDescriptor=1048576, SystemLoadAverage=578, ProcessCount=17, AvailableMemoryMB=2171 2024-11-21T22:04:36,648 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-11-21T22:04:36,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:04:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:04:36,660 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:04:36,660 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:36,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-21T22:04:36,663 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:04:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T22:04:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T22:04:36,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742214_1390 (size=404) 2024-11-21T22:04:36,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742214_1390 (size=404) 2024-11-21T22:04:36,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742214_1390 (size=404) 2024-11-21T22:04:36,779 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7fdf59088c34d62eb14bcc2925bef590, NAME => 'testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:36,787 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7ac35ffc60df92b12f6723b1694ed248, NAME => 'testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742215_1391 (size=65) 2024-11-21T22:04:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742215_1391 (size=65) 2024-11-21T22:04:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742215_1391 (size=65) 2024-11-21T22:04:36,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:36,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 7fdf59088c34d62eb14bcc2925bef590, disabling compactions & flushes 2024-11-21T22:04:36,813 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:36,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:36,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. after waiting 0 ms 2024-11-21T22:04:36,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:36,813 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:36,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7fdf59088c34d62eb14bcc2925bef590: Waiting for close lock at 1732226676813Disabling compacts and flushes for region at 1732226676813Disabling writes for close at 1732226676813Writing region close event to WAL at 1732226676813Closed at 1732226676813 2024-11-21T22:04:36,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742216_1392 (size=65) 2024-11-21T22:04:36,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742216_1392 (size=65) 2024-11-21T22:04:36,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742216_1392 (size=65) 2024-11-21T22:04:36,824 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:36,824 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 7ac35ffc60df92b12f6723b1694ed248, disabling compactions & flushes 2024-11-21T22:04:36,824 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:36,824 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:36,824 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. after waiting 0 ms 2024-11-21T22:04:36,824 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:36,824 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:36,824 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7ac35ffc60df92b12f6723b1694ed248: Waiting for close lock at 1732226676824Disabling compacts and flushes for region at 1732226676824Disabling writes for close at 1732226676824Writing region close event to WAL at 1732226676824Closed at 1732226676824 2024-11-21T22:04:36,826 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:04:36,826 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732226676826"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226676826"}]},"ts":"1732226676826"} 2024-11-21T22:04:36,826 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732226676826"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226676826"}]},"ts":"1732226676826"} 2024-11-21T22:04:36,830 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:04:36,832 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:04:36,832 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226676832"}]},"ts":"1732226676832"} 2024-11-21T22:04:36,834 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-21T22:04:36,835 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:04:36,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:04:36,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:04:36,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:04:36,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:04:36,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:04:36,836 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:04:36,836 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:04:36,836 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:04:36,837 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:04:36,837 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:04:36,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, ASSIGN}] 2024-11-21T22:04:36,839 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, ASSIGN 2024-11-21T22:04:36,839 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, ASSIGN 2024-11-21T22:04:36,841 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, ASSIGN; state=OFFLINE, location=2a884b411602,44629,1732226515126; forceNewPlan=false, retain=false 2024-11-21T22:04:36,841 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:04:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T22:04:36,991 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:04:36,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=7fdf59088c34d62eb14bcc2925bef590, regionState=OPENING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:36,992 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=7ac35ffc60df92b12f6723b1694ed248, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:04:36,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, ASSIGN because future has completed 2024-11-21T22:04:36,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fdf59088c34d62eb14bcc2925bef590, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:04:36,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, ASSIGN because future has completed 2024-11-21T22:04:36,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7ac35ffc60df92b12f6723b1694ed248, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:04:37,150 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:37,150 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => 7ac35ffc60df92b12f6723b1694ed248, NAME => 'testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => 7fdf59088c34d62eb14bcc2925bef590, NAME => 'testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. service=AccessControlService 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. service=AccessControlService 2024-11-21T22:04:37,151 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:37,151 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,151 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,152 INFO [StoreOpener-7fdf59088c34d62eb14bcc2925bef590-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,152 INFO [StoreOpener-7ac35ffc60df92b12f6723b1694ed248-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,154 INFO [StoreOpener-7ac35ffc60df92b12f6723b1694ed248-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7ac35ffc60df92b12f6723b1694ed248 columnFamilyName cf 2024-11-21T22:04:37,154 DEBUG [StoreOpener-7ac35ffc60df92b12f6723b1694ed248-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:37,155 INFO [StoreOpener-7ac35ffc60df92b12f6723b1694ed248-1 {}] regionserver.HStore(327): Store=7ac35ffc60df92b12f6723b1694ed248/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:37,155 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,155 INFO [StoreOpener-7fdf59088c34d62eb14bcc2925bef590-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fdf59088c34d62eb14bcc2925bef590 columnFamilyName cf 2024-11-21T22:04:37,155 DEBUG [StoreOpener-7fdf59088c34d62eb14bcc2925bef590-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:04:37,156 INFO [StoreOpener-7fdf59088c34d62eb14bcc2925bef590-1 {}] regionserver.HStore(327): Store=7fdf59088c34d62eb14bcc2925bef590/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:04:37,156 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,156 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,157 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,157 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,157 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,158 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,158 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,158 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,158 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,159 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,160 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,162 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:37,162 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened 7ac35ffc60df92b12f6723b1694ed248; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71952571, jitterRate=0.07217685878276825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:37,163 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:37,163 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for 7ac35ffc60df92b12f6723b1694ed248: Running coprocessor pre-open hook at 1732226677151Writing region info on filesystem at 1732226677151Initializing all the Stores at 1732226677152 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226677152Cleaning up temporary data from old regions at 1732226677158 (+6 ms)Running coprocessor post-open hooks at 1732226677163 (+5 ms)Region opened successfully at 1732226677163 2024-11-21T22:04:37,164 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:04:37,165 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248., pid=179, masterSystemTime=1732226677148 2024-11-21T22:04:37,165 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened 7fdf59088c34d62eb14bcc2925bef590; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59203396, jitterRate=-0.11780065298080444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:04:37,165 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:37,165 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for 7fdf59088c34d62eb14bcc2925bef590: Running coprocessor pre-open hook at 1732226677151Writing region info on filesystem at 1732226677151Initializing all the Stores at 1732226677152 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226677152Cleaning up temporary data from old regions at 1732226677158 (+6 ms)Running coprocessor post-open hooks at 1732226677165 (+7 ms)Region opened successfully at 1732226677165 2024-11-21T22:04:37,167 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590., pid=178, masterSystemTime=1732226677147 2024-11-21T22:04:37,168 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:37,168 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:37,169 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=7ac35ffc60df92b12f6723b1694ed248, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:04:37,171 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:37,171 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:37,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7ac35ffc60df92b12f6723b1694ed248, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:04:37,174 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=7fdf59088c34d62eb14bcc2925bef590, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:04:37,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7fdf59088c34d62eb14bcc2925bef590, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:04:37,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-21T22:04:37,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure 7ac35ffc60df92b12f6723b1694ed248, server=2a884b411602,41399,1732226514979 in 179 msec 2024-11-21T22:04:37,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=176 2024-11-21T22:04:37,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, ASSIGN in 343 msec 2024-11-21T22:04:37,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure 7fdf59088c34d62eb14bcc2925bef590, server=2a884b411602,44629,1732226515126 in 185 msec 2024-11-21T22:04:37,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=175 2024-11-21T22:04:37,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, ASSIGN in 345 msec 2024-11-21T22:04:37,185 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:04:37,185 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226677185"}]},"ts":"1732226677185"} 2024-11-21T22:04:37,187 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-21T22:04:37,189 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:04:37,189 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-21T22:04:37,192 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-21T22:04:37,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:37,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:37,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:37,246 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:04:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T22:04:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T22:04:37,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T22:04:37,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 1.1860 sec 2024-11-21T22:04:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T22:04:38,822 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T22:04:38,822 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-21T22:04:38,822 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:38,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-21T22:04:38,828 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:38,828 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-21T22:04:38,828 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T22:04:38,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T22:04:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226678832 (current time:1732226678832). 2024-11-21T22:04:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:04:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-21T22:04:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c703be8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:38,840 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:38,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:38,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:38,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b140f49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:38,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:38,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:38,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:38,842 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51322, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:38,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17ac8301, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:38,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:38,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:38,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:38,845 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32984, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:38,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:38,847 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1891541c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:38,852 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:38,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:38,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:38,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5133562d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:38,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:38,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:38,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:38,854 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:38,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6168dfb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:38,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:38,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:38,857 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:38,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:38,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:38,860 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44024, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:38,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:38,862 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-21T22:04:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T22:04:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-21T22:04:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T22:04:38,867 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:38,869 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:38,872 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:38,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742217_1393 (size=161) 2024-11-21T22:04:38,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742217_1393 (size=161) 2024-11-21T22:04:38,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742217_1393 (size=161) 2024-11-21T22:04:38,891 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:38,891 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248}] 2024-11-21T22:04:38,892 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:38,892 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T22:04:39,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-21T22:04:39,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 7ac35ffc60df92b12f6723b1694ed248: 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for 7fdf59088c34d62eb14bcc2925bef590: 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. for emptySnaptb0-testExportWithChecksum completed. 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. for emptySnaptb0-testExportWithChecksum completed. 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:04:39,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-21T22:04:39,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:39,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:04:39,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742218_1394 (size=68) 2024-11-21T22:04:39,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742218_1394 (size=68) 2024-11-21T22:04:39,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742218_1394 (size=68) 2024-11-21T22:04:39,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:39,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-21T22:04:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-21T22:04:39,080 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:39,081 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:39,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590 in 190 msec 2024-11-21T22:04:39,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742219_1395 (size=68) 2024-11-21T22:04:39,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742219_1395 (size=68) 2024-11-21T22:04:39,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742219_1395 (size=68) 2024-11-21T22:04:39,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:39,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-21T22:04:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-21T22:04:39,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:39,108 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:39,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-21T22:04:39,113 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:39,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248 in 220 msec 2024-11-21T22:04:39,114 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:39,115 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:39,115 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-21T22:04:39,116 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-21T22:04:39,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742220_1396 (size=543) 2024-11-21T22:04:39,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742220_1396 (size=543) 2024-11-21T22:04:39,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742220_1396 (size=543) 2024-11-21T22:04:39,155 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T22:04:39,190 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:39,191 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-21T22:04:39,193 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:39,193 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-21T22:04:39,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 330 msec 2024-11-21T22:04:39,381 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-21T22:04:39,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T22:04:39,491 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T22:04:39,496 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0cea539021de3a5c4153ceb877f12a558', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590., hostname=2a884b411602,44629,1732226515126, seqNum=2] 2024-11-21T22:04:39,501 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='1d85764d105b520dc0c5d752cc7d040cd', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:39,502 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='227ce796dc24335e132cce07f728e5a7d', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:39,507 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3142129a8f43a8d2402fe3705a1cc0776', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:39,507 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4643fc0ec9905a72712db5add203ffcd2', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:04:39,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:39,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:04:39,514 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T22:04:39,519 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-21T22:04:39,519 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:39,520 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:04:39,522 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T22:04:39,529 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T22:04:39,538 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T22:04:39,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T22:04:39,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226679542 (current time:1732226679542). 2024-11-21T22:04:39,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:04:39,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-21T22:04:39,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:04:39,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4032a4ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:39,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:39,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:39,544 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:39,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:39,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:39,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bec213d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:39,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:39,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:39,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:39,547 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51360, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:39,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77f52959, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:39,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:39,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:39,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:39,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33002, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:39,552 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:39,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:39,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:39,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:39,553 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fee8181, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:04:39,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:04:39,556 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:04:39,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:04:39,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:04:39,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6591fca0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:39,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:04:39,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:04:39,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:39,558 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51376, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:04:39,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7898f2d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:04:39,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:04:39,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:04:39,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:39,563 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33018, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:39,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:04:39,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:04:39,568 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44030, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:04:39,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183. 2024-11-21T22:04:39,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:04:39,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:39,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:04:39,570 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:04:39,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-21T22:04:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:04:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T22:04:39,573 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:04:39,575 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:04:39,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-21T22:04:39,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-21T22:04:39,578 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:04:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742221_1397 (size=156) 2024-11-21T22:04:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742221_1397 (size=156) 2024-11-21T22:04:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742221_1397 (size=156) 2024-11-21T22:04:39,606 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:04:39,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248}] 2024-11-21T22:04:39,608 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:39,608 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:39,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-21T22:04:39,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-21T22:04:39,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:39,761 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing 7ac35ffc60df92b12f6723b1694ed248 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-21T22:04:39,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-21T22:04:39,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:39,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing 7fdf59088c34d62eb14bcc2925bef590 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-21T22:04:39,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/.tmp/cf/90ab1f510d994e49931b60cd5f3409ca is 71, key is 05c0a9f5e9175c41b9639217aec6475b/cf:q/1732226679508/Put/seqid=0 2024-11-21T22:04:39,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/.tmp/cf/71da5120f26b4a9491b79456bab471bc is 71, key is 1841c6fcecc3b9117202ad921bccf934/cf:q/1732226679512/Put/seqid=0 2024-11-21T22:04:39,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742222_1398 (size=5216) 2024-11-21T22:04:39,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742222_1398 (size=5216) 2024-11-21T22:04:39,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742222_1398 (size=5216) 2024-11-21T22:04:39,800 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/.tmp/cf/90ab1f510d994e49931b60cd5f3409ca 2024-11-21T22:04:39,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742223_1399 (size=8392) 2024-11-21T22:04:39,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742223_1399 (size=8392) 2024-11-21T22:04:39,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742223_1399 (size=8392) 2024-11-21T22:04:39,803 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/.tmp/cf/71da5120f26b4a9491b79456bab471bc 2024-11-21T22:04:39,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/.tmp/cf/90ab1f510d994e49931b60cd5f3409ca as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf/90ab1f510d994e49931b60cd5f3409ca 2024-11-21T22:04:39,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/.tmp/cf/71da5120f26b4a9491b79456bab471bc as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc 2024-11-21T22:04:39,811 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf/90ab1f510d994e49931b60cd5f3409ca, entries=2, sequenceid=6, filesize=5.1 K 2024-11-21T22:04:39,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 7fdf59088c34d62eb14bcc2925bef590 in 49ms, sequenceid=6, compaction requested=false 2024-11-21T22:04:39,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for 7fdf59088c34d62eb14bcc2925bef590: 2024-11-21T22:04:39,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. for snaptb0-testExportWithChecksum completed. 2024-11-21T22:04:39,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc, entries=48, sequenceid=6, filesize=8.2 K 2024-11-21T22:04:39,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-21T22:04:39,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:39,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf/90ab1f510d994e49931b60cd5f3409ca] hfiles 2024-11-21T22:04:39,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf/90ab1f510d994e49931b60cd5f3409ca for snapshot=snaptb0-testExportWithChecksum 2024-11-21T22:04:39,813 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 7ac35ffc60df92b12f6723b1694ed248 in 52ms, sequenceid=6, compaction requested=false 2024-11-21T22:04:39,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for 7ac35ffc60df92b12f6723b1694ed248: 2024-11-21T22:04:39,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. for snaptb0-testExportWithChecksum completed. 2024-11-21T22:04:39,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-21T22:04:39,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:04:39,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc] hfiles 2024-11-21T22:04:39,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc for snapshot=snaptb0-testExportWithChecksum 2024-11-21T22:04:39,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742224_1400 (size=107) 2024-11-21T22:04:39,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742224_1400 (size=107) 2024-11-21T22:04:39,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742224_1400 (size=107) 2024-11-21T22:04:39,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:04:39,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-21T22:04:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-21T22:04:39,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:39,825 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:04:39,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7fdf59088c34d62eb14bcc2925bef590 in 220 msec 2024-11-21T22:04:39,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742225_1401 (size=107) 2024-11-21T22:04:39,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742225_1401 (size=107) 2024-11-21T22:04:39,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742225_1401 (size=107) 2024-11-21T22:04:39,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:04:39,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-21T22:04:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-21T22:04:39,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:39,833 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:04:39,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=185, resume processing ppid=183 2024-11-21T22:04:39,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7ac35ffc60df92b12f6723b1694ed248 in 227 msec 2024-11-21T22:04:39,839 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:04:39,840 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:04:39,840 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:04:39,840 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-21T22:04:39,841 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T22:04:39,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742226_1402 (size=621) 2024-11-21T22:04:39,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742226_1402 (size=621) 2024-11-21T22:04:39,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742226_1402 (size=621) 2024-11-21T22:04:39,855 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:04:39,861 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:04:39,862 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-21T22:04:39,863 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:04:39,863 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-21T22:04:39,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 292 msec 2024-11-21T22:04:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-21T22:04:39,903 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T22:04:39,903 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903 2024-11-21T22:04:39,903 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:39,948 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:04:39,948 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@6d27884a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T22:04:39,950 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:04:39,960 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T22:04:40,023 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:40,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:40,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:40,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0007_000001 (auth:SIMPLE) from 127.0.0.1:56106 2024-11-21T22:04:40,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_2/usercache/jenkins/appcache/application_1732226522247_0007/container_1732226522247_0007_01_000001/launch_container.sh] 2024-11-21T22:04:40,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_2/usercache/jenkins/appcache/application_1732226522247_0007/container_1732226522247_0007_01_000001/container_tokens] 2024-11-21T22:04:40,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_2/usercache/jenkins/appcache/application_1732226522247_0007/container_1732226522247_0007_01_000001/sysfs] 2024-11-21T22:04:41,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-5555783216675322123.jar 2024-11-21T22:04:41,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-7372851807301130355.jar 2024-11-21T22:04:41,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:04:41,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:04:41,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:04:41,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:04:41,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:04:41,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:04:41,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:04:41,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:04:41,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:41,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:41,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:04:41,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:41,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:04:41,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:04:41,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:04:41,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742227_1403 (size=131440) 2024-11-21T22:04:41,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742227_1403 (size=131440) 2024-11-21T22:04:41,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742227_1403 (size=131440) 2024-11-21T22:04:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742228_1404 (size=4188619) 2024-11-21T22:04:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742228_1404 (size=4188619) 2024-11-21T22:04:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742228_1404 (size=4188619) 2024-11-21T22:04:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742229_1405 (size=440656) 2024-11-21T22:04:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742229_1405 (size=440656) 2024-11-21T22:04:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742229_1405 (size=440656) 2024-11-21T22:04:41,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742230_1406 (size=1323991) 2024-11-21T22:04:41,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742230_1406 (size=1323991) 2024-11-21T22:04:41,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742230_1406 (size=1323991) 2024-11-21T22:04:41,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742231_1407 (size=903739) 2024-11-21T22:04:41,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742231_1407 (size=903739) 2024-11-21T22:04:41,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742231_1407 (size=903739) 2024-11-21T22:04:41,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742232_1408 (size=8360083) 2024-11-21T22:04:41,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742232_1408 (size=8360083) 2024-11-21T22:04:41,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742232_1408 (size=8360083) 2024-11-21T22:04:41,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742233_1409 (size=6424742) 2024-11-21T22:04:41,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742233_1409 (size=6424742) 2024-11-21T22:04:41,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742233_1409 (size=6424742) 2024-11-21T22:04:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742234_1410 (size=1877034) 2024-11-21T22:04:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742234_1410 (size=1877034) 2024-11-21T22:04:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742234_1410 (size=1877034) 2024-11-21T22:04:41,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742235_1411 (size=77835) 2024-11-21T22:04:41,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742235_1411 (size=77835) 2024-11-21T22:04:41,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742235_1411 (size=77835) 2024-11-21T22:04:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742236_1412 (size=30949) 2024-11-21T22:04:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742236_1412 (size=30949) 2024-11-21T22:04:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742236_1412 (size=30949) 2024-11-21T22:04:41,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742237_1413 (size=1597347) 2024-11-21T22:04:41,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742237_1413 (size=1597347) 2024-11-21T22:04:41,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742237_1413 (size=1597347) 2024-11-21T22:04:41,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742238_1414 (size=4695811) 2024-11-21T22:04:41,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742238_1414 (size=4695811) 2024-11-21T22:04:41,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742238_1414 (size=4695811) 2024-11-21T22:04:41,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742239_1415 (size=232957) 2024-11-21T22:04:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742239_1415 (size=232957) 2024-11-21T22:04:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742239_1415 (size=232957) 2024-11-21T22:04:41,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742240_1416 (size=127628) 2024-11-21T22:04:41,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742240_1416 (size=127628) 2024-11-21T22:04:41,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742240_1416 (size=127628) 2024-11-21T22:04:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742241_1417 (size=20406) 2024-11-21T22:04:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742241_1417 (size=20406) 2024-11-21T22:04:41,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742241_1417 (size=20406) 2024-11-21T22:04:41,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742242_1418 (size=5175431) 2024-11-21T22:04:41,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742242_1418 (size=5175431) 2024-11-21T22:04:41,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742242_1418 (size=5175431) 2024-11-21T22:04:41,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742243_1419 (size=217634) 2024-11-21T22:04:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742243_1419 (size=217634) 2024-11-21T22:04:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742243_1419 (size=217634) 2024-11-21T22:04:41,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742244_1420 (size=1832290) 2024-11-21T22:04:41,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742244_1420 (size=1832290) 2024-11-21T22:04:41,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742244_1420 (size=1832290) 2024-11-21T22:04:41,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742245_1421 (size=322274) 2024-11-21T22:04:41,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742245_1421 (size=322274) 2024-11-21T22:04:41,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742245_1421 (size=322274) 2024-11-21T22:04:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742246_1422 (size=503880) 2024-11-21T22:04:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742246_1422 (size=503880) 2024-11-21T22:04:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742246_1422 (size=503880) 2024-11-21T22:04:41,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742247_1423 (size=29229) 2024-11-21T22:04:41,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742247_1423 (size=29229) 2024-11-21T22:04:41,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742247_1423 (size=29229) 2024-11-21T22:04:41,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742248_1424 (size=24096) 2024-11-21T22:04:41,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742248_1424 (size=24096) 2024-11-21T22:04:41,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742248_1424 (size=24096) 2024-11-21T22:04:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742249_1425 (size=111872) 2024-11-21T22:04:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742249_1425 (size=111872) 2024-11-21T22:04:41,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742249_1425 (size=111872) 2024-11-21T22:04:41,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742250_1426 (size=45609) 2024-11-21T22:04:41,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742250_1426 (size=45609) 2024-11-21T22:04:41,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742250_1426 (size=45609) 2024-11-21T22:04:41,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742251_1427 (size=136454) 2024-11-21T22:04:41,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742251_1427 (size=136454) 2024-11-21T22:04:41,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742251_1427 (size=136454) 2024-11-21T22:04:41,482 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:04:41,484 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-21T22:04:41,486 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:04:41,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742252_1428 (size=338) 2024-11-21T22:04:41,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742252_1428 (size=338) 2024-11-21T22:04:41,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742252_1428 (size=338) 2024-11-21T22:04:41,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742253_1429 (size=15) 2024-11-21T22:04:41,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742253_1429 (size=15) 2024-11-21T22:04:41,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742253_1429 (size=15) 2024-11-21T22:04:41,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742254_1430 (size=303776) 2024-11-21T22:04:41,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742254_1430 (size=303776) 2024-11-21T22:04:41,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742254_1430 (size=303776) 2024-11-21T22:04:41,534 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:04:41,534 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:04:41,844 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:04:42,309 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:33196 2024-11-21T22:04:44,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-21T22:04:44,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-21T22:04:44,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-21T22:04:48,829 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:56510 2024-11-21T22:04:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742255_1431 (size=349426) 2024-11-21T22:04:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742255_1431 (size=349426) 2024-11-21T22:04:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742255_1431 (size=349426) 2024-11-21T22:04:49,927 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:04:51,074 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:33208 2024-11-21T22:04:53,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Error: java.io.IOException: Checksum mismatch between hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903/archive/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-21T22:04:55,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f7a1444316755d092bee2d182dde7a0c, had cached 0 bytes from a total of 5422 2024-11-21T22:04:55,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7c3e82354b20e9e6eb82600246f4d616, had cached 0 bytes from a total of 8190 2024-11-21T22:04:55,956 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:41898 2024-11-21T22:04:59,348 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000003/launch_container.sh] 2024-11-21T22:04:59,348 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000003/container_tokens] 2024-11-21T22:04:59,348 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000003/sysfs] 2024-11-21T22:04:59,632 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000002/launch_container.sh] 2024-11-21T22:04:59,632 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000002/container_tokens] 2024-11-21T22:04:59,632 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903/archive/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-21T22:05:00,970 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:41902 2024-11-21T22:05:01,214 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7c3e82354b20e9e6eb82600246f4d616 changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:05:01,214 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7ac35ffc60df92b12f6723b1694ed248 changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:05:01,214 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f7a1444316755d092bee2d182dde7a0c changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:05:01,214 DEBUG [master/2a884b411602:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7fdf59088c34d62eb14bcc2925bef590 changed from -1.0 to 0.0, refreshing cache 2024-11-21T22:05:04,438 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000004/launch_container.sh] 2024-11-21T22:05:04,438 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000004/container_tokens] 2024-11-21T22:05:04,438 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/local-export-1732226679903/archive/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-21T22:05:05,980 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:51050 2024-11-21T22:05:09,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742256_1432 (size=21330) 2024-11-21T22:05:09,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742256_1432 (size=21330) 2024-11-21T22:05:09,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742256_1432 (size=21330) 2024-11-21T22:05:09,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742257_1433 (size=460) 2024-11-21T22:05:09,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742257_1433 (size=460) 2024-11-21T22:05:09,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742257_1433 (size=460) 2024-11-21T22:05:09,343 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000005/launch_container.sh] 2024-11-21T22:05:09,344 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000005/container_tokens] 2024-11-21T22:05:09,344 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_1/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000005/sysfs] 2024-11-21T22:05:09,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742258_1434 (size=21330) 2024-11-21T22:05:09,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742258_1434 (size=21330) 2024-11-21T22:05:09,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742258_1434 (size=21330) 2024-11-21T22:05:09,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742259_1435 (size=349426) 2024-11-21T22:05:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742259_1435 (size=349426) 2024-11-21T22:05:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742259_1435 (size=349426) 2024-11-21T22:05:09,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:51064 2024-11-21T22:05:10,782 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732226522247_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:05:10,783 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783 2024-11-21T22:05:10,783 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:05:10,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:05:10,825 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T22:05:10,828 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:05:10,835 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T22:05:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742260_1436 (size=621) 2024-11-21T22:05:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742261_1437 (size=156) 2024-11-21T22:05:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742261_1437 (size=156) 2024-11-21T22:05:10,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742260_1436 (size=621) 2024-11-21T22:05:10,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742260_1436 (size=621) 2024-11-21T22:05:10,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742261_1437 (size=156) 2024-11-21T22:05:10,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:10,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:10,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:11,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-1139597074793324339.jar 2024-11-21T22:05:11,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:11,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-8946713425145321981.jar 2024-11-21T22:05:12,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:12,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:05:12,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:05:12,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:05:12,029 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:05:12,029 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:05:12,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:05:12,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:05:12,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:05:12,031 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:05:12,031 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:05:12,031 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:05:12,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:12,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:12,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:05:12,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:12,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:12,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:05:12,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:05:12,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742262_1438 (size=131440) 2024-11-21T22:05:12,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742262_1438 (size=131440) 2024-11-21T22:05:12,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742262_1438 (size=131440) 2024-11-21T22:05:12,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742263_1439 (size=440656) 2024-11-21T22:05:12,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742263_1439 (size=440656) 2024-11-21T22:05:12,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742263_1439 (size=440656) 2024-11-21T22:05:12,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742264_1440 (size=4188619) 2024-11-21T22:05:12,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742264_1440 (size=4188619) 2024-11-21T22:05:12,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742264_1440 (size=4188619) 2024-11-21T22:05:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742265_1441 (size=1323991) 2024-11-21T22:05:12,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742265_1441 (size=1323991) 2024-11-21T22:05:12,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742265_1441 (size=1323991) 2024-11-21T22:05:12,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742266_1442 (size=903739) 2024-11-21T22:05:12,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742266_1442 (size=903739) 2024-11-21T22:05:12,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742266_1442 (size=903739) 2024-11-21T22:05:12,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742267_1443 (size=8360083) 2024-11-21T22:05:12,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742267_1443 (size=8360083) 2024-11-21T22:05:12,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742267_1443 (size=8360083) 2024-11-21T22:05:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742268_1444 (size=6424742) 2024-11-21T22:05:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742268_1444 (size=6424742) 2024-11-21T22:05:12,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742268_1444 (size=6424742) 2024-11-21T22:05:12,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742269_1445 (size=1877034) 2024-11-21T22:05:12,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742269_1445 (size=1877034) 2024-11-21T22:05:12,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742269_1445 (size=1877034) 2024-11-21T22:05:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742270_1446 (size=77835) 2024-11-21T22:05:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742270_1446 (size=77835) 2024-11-21T22:05:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742270_1446 (size=77835) 2024-11-21T22:05:12,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742271_1447 (size=30949) 2024-11-21T22:05:12,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742271_1447 (size=30949) 2024-11-21T22:05:12,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742271_1447 (size=30949) 2024-11-21T22:05:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742272_1448 (size=1597347) 2024-11-21T22:05:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742272_1448 (size=1597347) 2024-11-21T22:05:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742272_1448 (size=1597347) 2024-11-21T22:05:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742273_1449 (size=4695811) 2024-11-21T22:05:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742273_1449 (size=4695811) 2024-11-21T22:05:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742273_1449 (size=4695811) 2024-11-21T22:05:12,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742274_1450 (size=232957) 2024-11-21T22:05:12,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742274_1450 (size=232957) 2024-11-21T22:05:12,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742274_1450 (size=232957) 2024-11-21T22:05:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742275_1451 (size=127628) 2024-11-21T22:05:12,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742275_1451 (size=127628) 2024-11-21T22:05:12,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742275_1451 (size=127628) 2024-11-21T22:05:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742276_1452 (size=20406) 2024-11-21T22:05:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742276_1452 (size=20406) 2024-11-21T22:05:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742276_1452 (size=20406) 2024-11-21T22:05:12,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742277_1453 (size=5175431) 2024-11-21T22:05:12,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742277_1453 (size=5175431) 2024-11-21T22:05:12,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742277_1453 (size=5175431) 2024-11-21T22:05:12,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742278_1454 (size=217634) 2024-11-21T22:05:12,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742278_1454 (size=217634) 2024-11-21T22:05:12,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742278_1454 (size=217634) 2024-11-21T22:05:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742279_1455 (size=1832290) 2024-11-21T22:05:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742279_1455 (size=1832290) 2024-11-21T22:05:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742279_1455 (size=1832290) 2024-11-21T22:05:12,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742280_1456 (size=322274) 2024-11-21T22:05:12,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742280_1456 (size=322274) 2024-11-21T22:05:12,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742280_1456 (size=322274) 2024-11-21T22:05:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742281_1457 (size=503880) 2024-11-21T22:05:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742281_1457 (size=503880) 2024-11-21T22:05:12,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742281_1457 (size=503880) 2024-11-21T22:05:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742282_1458 (size=29229) 2024-11-21T22:05:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742282_1458 (size=29229) 2024-11-21T22:05:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742282_1458 (size=29229) 2024-11-21T22:05:12,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742283_1459 (size=24096) 2024-11-21T22:05:12,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742283_1459 (size=24096) 2024-11-21T22:05:12,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742283_1459 (size=24096) 2024-11-21T22:05:12,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742284_1460 (size=111872) 2024-11-21T22:05:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742284_1460 (size=111872) 2024-11-21T22:05:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742284_1460 (size=111872) 2024-11-21T22:05:12,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742285_1461 (size=45609) 2024-11-21T22:05:12,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742285_1461 (size=45609) 2024-11-21T22:05:12,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742285_1461 (size=45609) 2024-11-21T22:05:12,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742286_1462 (size=136454) 2024-11-21T22:05:12,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742286_1462 (size=136454) 2024-11-21T22:05:12,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742286_1462 (size=136454) 2024-11-21T22:05:12,937 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:05:12,939 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-21T22:05:12,941 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:05:12,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742287_1463 (size=338) 2024-11-21T22:05:12,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742287_1463 (size=338) 2024-11-21T22:05:12,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742287_1463 (size=338) 2024-11-21T22:05:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742288_1464 (size=15) 2024-11-21T22:05:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742288_1464 (size=15) 2024-11-21T22:05:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742288_1464 (size=15) 2024-11-21T22:05:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742289_1465 (size=303728) 2024-11-21T22:05:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742289_1465 (size=303728) 2024-11-21T22:05:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742289_1465 (size=303728) 2024-11-21T22:05:15,629 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:05:15,629 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:05:15,633 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0008_000001 (auth:SIMPLE) from 127.0.0.1:43726 2024-11-21T22:05:15,658 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_2/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000001/launch_container.sh] 2024-11-21T22:05:15,658 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_2/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000001/container_tokens] 2024-11-21T22:05:15,658 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_2/usercache/jenkins/appcache/application_1732226522247_0008/container_1732226522247_0008_01_000001/sysfs] 2024-11-21T22:05:16,283 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0009_000001 (auth:SIMPLE) from 127.0.0.1:41214 2024-11-21T22:05:21,618 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0009_000001 (auth:SIMPLE) from 127.0.0.1:51396 2024-11-21T22:05:21,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742290_1466 (size=349378) 2024-11-21T22:05:21,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742290_1466 (size=349378) 2024-11-21T22:05:21,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742290_1466 (size=349378) 2024-11-21T22:05:22,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7ac35ffc60df92b12f6723b1694ed248, had cached 0 bytes from a total of 8392 2024-11-21T22:05:22,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7fdf59088c34d62eb14bcc2925bef590, had cached 0 bytes from a total of 5216 2024-11-21T22:05:23,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:05:23,802 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0009_000001 (auth:SIMPLE) from 127.0.0.1:37146 2024-11-21T22:05:27,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742291_1467 (size=8392) 2024-11-21T22:05:27,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742291_1467 (size=8392) 2024-11-21T22:05:27,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742291_1467 (size=8392) 2024-11-21T22:05:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742292_1468 (size=5216) 2024-11-21T22:05:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742292_1468 (size=5216) 2024-11-21T22:05:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742292_1468 (size=5216) 2024-11-21T22:05:27,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742293_1469 (size=17413) 2024-11-21T22:05:27,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742293_1469 (size=17413) 2024-11-21T22:05:27,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742293_1469 (size=17413) 2024-11-21T22:05:27,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742294_1470 (size=462) 2024-11-21T22:05:27,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742294_1470 (size=462) 2024-11-21T22:05:27,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742294_1470 (size=462) 2024-11-21T22:05:27,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742295_1471 (size=17413) 2024-11-21T22:05:27,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742295_1471 (size=17413) 2024-11-21T22:05:27,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742295_1471 (size=17413) 2024-11-21T22:05:27,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742296_1472 (size=349378) 2024-11-21T22:05:27,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742296_1472 (size=349378) 2024-11-21T22:05:27,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742296_1472 (size=349378) 2024-11-21T22:05:27,390 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0009_000001 (auth:SIMPLE) from 127.0.0.1:37154 2024-11-21T22:05:29,315 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:05:29,316 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:05:29,332 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-21T22:05:29,332 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:05:29,332 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:05:29,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-21T22:05:29,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-21T22:05:29,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-21T22:05:29,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-21T22:05:29,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-21T22:05:29,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226710783/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-21T22:05:29,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-21T22:05:29,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-21T22:05:29,343 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226729343"}]},"ts":"1732226729343"} 2024-11-21T22:05:29,345 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-21T22:05:29,345 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-21T22:05:29,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-21T22:05:29,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, UNASSIGN}] 2024-11-21T22:05:29,349 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, UNASSIGN 2024-11-21T22:05:29,350 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, UNASSIGN 2024-11-21T22:05:29,351 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=7fdf59088c34d62eb14bcc2925bef590, regionState=CLOSING, regionLocation=2a884b411602,44629,1732226515126 2024-11-21T22:05:29,351 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=7ac35ffc60df92b12f6723b1694ed248, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:05:29,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, UNASSIGN because future has completed 2024-11-21T22:05:29,354 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:05:29,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7fdf59088c34d62eb14bcc2925bef590, server=2a884b411602,44629,1732226515126}] 2024-11-21T22:05:29,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, UNASSIGN because future has completed 2024-11-21T22:05:29,357 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:05:29,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7ac35ffc60df92b12f6723b1694ed248, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:05:29,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-21T22:05:29,509 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:05:29,510 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:05:29,510 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing 7fdf59088c34d62eb14bcc2925bef590, disabling compactions & flushes 2024-11-21T22:05:29,510 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:05:29,510 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:05:29,510 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. after waiting 0 ms 2024-11-21T22:05:29,510 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:05:29,512 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:05:29,513 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:05:29,513 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing 7ac35ffc60df92b12f6723b1694ed248, disabling compactions & flushes 2024-11-21T22:05:29,513 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:05:29,513 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:05:29,513 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. after waiting 0 ms 2024-11-21T22:05:29,513 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:05:29,540 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:05:29,541 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:05:29,541 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590. 2024-11-21T22:05:29,541 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for 7fdf59088c34d62eb14bcc2925bef590: Waiting for close lock at 1732226729510Running coprocessor pre-close hooks at 1732226729510Disabling compacts and flushes for region at 1732226729510Disabling writes for close at 1732226729510Writing region close event to WAL at 1732226729510Running coprocessor post-close hooks at 1732226729541 (+31 ms)Closed at 1732226729541 2024-11-21T22:05:29,545 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed 7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:05:29,546 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=7fdf59088c34d62eb14bcc2925bef590, regionState=CLOSED 2024-11-21T22:05:29,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7fdf59088c34d62eb14bcc2925bef590, server=2a884b411602,44629,1732226515126 because future has completed 2024-11-21T22:05:29,551 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:05:29,552 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:05:29,552 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248. 2024-11-21T22:05:29,552 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for 7ac35ffc60df92b12f6723b1694ed248: Waiting for close lock at 1732226729513Running coprocessor pre-close hooks at 1732226729513Disabling compacts and flushes for region at 1732226729513Disabling writes for close at 1732226729513Writing region close event to WAL at 1732226729547 (+34 ms)Running coprocessor post-close hooks at 1732226729552 (+5 ms)Closed at 1732226729552 2024-11-21T22:05:29,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=188 2024-11-21T22:05:29,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure 7fdf59088c34d62eb14bcc2925bef590, server=2a884b411602,44629,1732226515126 in 196 msec 2024-11-21T22:05:29,554 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed 7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:05:29,556 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=7ac35ffc60df92b12f6723b1694ed248, regionState=CLOSED 2024-11-21T22:05:29,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7fdf59088c34d62eb14bcc2925bef590, UNASSIGN in 207 msec 2024-11-21T22:05:29,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7ac35ffc60df92b12f6723b1694ed248, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:05:29,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-21T22:05:29,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure 7ac35ffc60df92b12f6723b1694ed248, server=2a884b411602,41399,1732226514979 in 203 msec 2024-11-21T22:05:29,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=187 2024-11-21T22:05:29,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7ac35ffc60df92b12f6723b1694ed248, UNASSIGN in 214 msec 2024-11-21T22:05:29,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-21T22:05:29,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 219 msec 2024-11-21T22:05:29,568 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226729568"}]},"ts":"1732226729568"} 2024-11-21T22:05:29,570 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-21T22:05:29,570 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-21T22:05:29,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 231 msec 2024-11-21T22:05:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-21T22:05:29,661 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T22:05:29,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-21T22:05:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,664 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-21T22:05:29,666 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,671 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-21T22:05:29,678 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:05:29,678 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:05:29,680 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/recovered.edits] 2024-11-21T22:05:29,680 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/recovered.edits] 2024-11-21T22:05:29,687 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf/90ab1f510d994e49931b60cd5f3409ca to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/cf/90ab1f510d994e49931b60cd5f3409ca 2024-11-21T22:05:29,687 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/cf/71da5120f26b4a9491b79456bab471bc 2024-11-21T22:05:29,691 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248/recovered.edits/9.seqid 2024-11-21T22:05:29,691 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590/recovered.edits/9.seqid 2024-11-21T22:05:29,692 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7fdf59088c34d62eb14bcc2925bef590 2024-11-21T22:05:29,692 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportWithChecksum/7ac35ffc60df92b12f6723b1694ed248 2024-11-21T22:05:29,692 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-21T22:05:29,695 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,698 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-21T22:05:29,701 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-21T22:05:29,702 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,702 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-21T22:05:29,703 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226729702"}]},"ts":"9223372036854775807"} 2024-11-21T22:05:29,703 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226729702"}]},"ts":"9223372036854775807"} 2024-11-21T22:05:29,708 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:05:29,708 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7fdf59088c34d62eb14bcc2925bef590, NAME => 'testtb-testExportWithChecksum,,1732226676655.7fdf59088c34d62eb14bcc2925bef590.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7ac35ffc60df92b12f6723b1694ed248, NAME => 'testtb-testExportWithChecksum,1,1732226676655.7ac35ffc60df92b12f6723b1694ed248.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:05:29,708 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-21T22:05:29,708 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226729708"}]},"ts":"9223372036854775807"} 2024-11-21T22:05:29,712 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-21T22:05:29,713 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T22:05:29,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,715 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 51 msec 2024-11-21T22:05:29,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T22:05:29,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T22:05:29,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T22:05:29,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T22:05:29,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:29,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:29,723 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,723 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:29,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T22:05:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:29,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-21T22:05:29,725 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-21T22:05:29,725 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T22:05:29,727 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:29,727 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:29,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:29,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:29,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-21T22:05:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-21T22:05:29,742 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-21T22:05:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-21T22:05:29,776 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=805 (was 806), OpenFileDescriptor=806 (was 818), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=551 (was 578), ProcessCount=17 (was 17), AvailableMemoryMB=2374 (was 2171) - AvailableMemoryMB LEAK? - 2024-11-21T22:05:29,777 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-21T22:05:29,795 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=805, OpenFileDescriptor=806, MaxFileDescriptor=1048576, SystemLoadAverage=551, ProcessCount=17, AvailableMemoryMB=2373 2024-11-21T22:05:29,795 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-21T22:05:29,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T22:05:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:29,799 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T22:05:29,799 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:05:29,799 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-21T22:05:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T22:05:29,800 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T22:05:29,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742297_1473 (size=418) 2024-11-21T22:05:29,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742297_1473 (size=418) 2024-11-21T22:05:29,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742297_1473 (size=418) 2024-11-21T22:05:29,812 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3f2cd4b98a32e900cdb76a3025b3b500, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:05:29,812 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f61de7dd7ba8c7a2616adffdda464926, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:05:29,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742298_1474 (size=79) 2024-11-21T22:05:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742298_1474 (size=79) 2024-11-21T22:05:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742298_1474 (size=79) 2024-11-21T22:05:29,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:05:29,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 3f2cd4b98a32e900cdb76a3025b3b500, disabling compactions & flushes 2024-11-21T22:05:29,839 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:29,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:29,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. after waiting 0 ms 2024-11-21T22:05:29,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:29,839 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:29,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3f2cd4b98a32e900cdb76a3025b3b500: Waiting for close lock at 1732226729839Disabling compacts and flushes for region at 1732226729839Disabling writes for close at 1732226729839Writing region close event to WAL at 1732226729839Closed at 1732226729839 2024-11-21T22:05:29,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742299_1475 (size=79) 2024-11-21T22:05:29,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742299_1475 (size=79) 2024-11-21T22:05:29,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742299_1475 (size=79) 2024-11-21T22:05:29,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:05:29,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing f61de7dd7ba8c7a2616adffdda464926, disabling compactions & flushes 2024-11-21T22:05:29,858 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:29,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:29,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. after waiting 0 ms 2024-11-21T22:05:29,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:29,858 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:29,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for f61de7dd7ba8c7a2616adffdda464926: Waiting for close lock at 1732226729858Disabling compacts and flushes for region at 1732226729858Disabling writes for close at 1732226729858Writing region close event to WAL at 1732226729858Closed at 1732226729858 2024-11-21T22:05:29,860 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T22:05:29,860 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732226729860"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226729860"}]},"ts":"1732226729860"} 2024-11-21T22:05:29,860 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732226729860"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732226729860"}]},"ts":"1732226729860"} 2024-11-21T22:05:29,863 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T22:05:29,865 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T22:05:29,865 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226729865"}]},"ts":"1732226729865"} 2024-11-21T22:05:29,867 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-21T22:05:29,868 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {2a884b411602=0} racks are {/default-rack=0} 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T22:05:29,869 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T22:05:29,869 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T22:05:29,869 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T22:05:29,869 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T22:05:29,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, ASSIGN}] 2024-11-21T22:05:29,870 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, ASSIGN 2024-11-21T22:05:29,871 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, ASSIGN 2024-11-21T22:05:29,876 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, ASSIGN; state=OFFLINE, location=2a884b411602,41399,1732226514979; forceNewPlan=false, retain=false 2024-11-21T22:05:29,876 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, ASSIGN; state=OFFLINE, location=2a884b411602,36419,1732226515183; forceNewPlan=false, retain=false 2024-11-21T22:05:29,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T22:05:30,027 INFO [2a884b411602:42183 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T22:05:30,027 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=f61de7dd7ba8c7a2616adffdda464926, regionState=OPENING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:05:30,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=3f2cd4b98a32e900cdb76a3025b3b500, regionState=OPENING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:05:30,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, ASSIGN because future has completed 2024-11-21T22:05:30,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure f61de7dd7ba8c7a2616adffdda464926, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:05:30,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, ASSIGN because future has completed 2024-11-21T22:05:30,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:05:30,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T22:05:30,184 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:30,184 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => f61de7dd7ba8c7a2616adffdda464926, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T22:05:30,184 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. service=AccessControlService 2024-11-21T22:05:30,184 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:05:30,185 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,185 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:05:30,185 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,185 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,186 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,186 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f2cd4b98a32e900cdb76a3025b3b500, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T22:05:30,186 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. service=AccessControlService 2024-11-21T22:05:30,186 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T22:05:30,186 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,186 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T22:05:30,186 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,186 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,187 INFO [StoreOpener-f61de7dd7ba8c7a2616adffdda464926-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,187 INFO [StoreOpener-3f2cd4b98a32e900cdb76a3025b3b500-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,188 INFO [StoreOpener-f61de7dd7ba8c7a2616adffdda464926-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f61de7dd7ba8c7a2616adffdda464926 columnFamilyName cf 2024-11-21T22:05:30,189 DEBUG [StoreOpener-f61de7dd7ba8c7a2616adffdda464926-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:05:30,189 INFO [StoreOpener-f61de7dd7ba8c7a2616adffdda464926-1 {}] regionserver.HStore(327): Store=f61de7dd7ba8c7a2616adffdda464926/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:05:30,189 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,190 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,190 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,190 INFO [StoreOpener-3f2cd4b98a32e900cdb76a3025b3b500-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f2cd4b98a32e900cdb76a3025b3b500 columnFamilyName cf 2024-11-21T22:05:30,190 DEBUG [StoreOpener-3f2cd4b98a32e900cdb76a3025b3b500-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T22:05:30,191 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,191 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,191 INFO [StoreOpener-3f2cd4b98a32e900cdb76a3025b3b500-1 {}] regionserver.HStore(327): Store=3f2cd4b98a32e900cdb76a3025b3b500/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T22:05:30,191 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,192 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,192 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,192 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,192 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,193 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,194 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,196 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:05:30,196 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T22:05:30,196 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened f61de7dd7ba8c7a2616adffdda464926; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66157792, jitterRate=-0.014172077178955078}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:05:30,196 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,197 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for f61de7dd7ba8c7a2616adffdda464926: Running coprocessor pre-open hook at 1732226730185Writing region info on filesystem at 1732226730185Initializing all the Stores at 1732226730187 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226730187Cleaning up temporary data from old regions at 1732226730191 (+4 ms)Running coprocessor post-open hooks at 1732226730196 (+5 ms)Region opened successfully at 1732226730197 (+1 ms) 2024-11-21T22:05:30,198 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened 3f2cd4b98a32e900cdb76a3025b3b500; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63845495, jitterRate=-0.04862798750400543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T22:05:30,198 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,198 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for 3f2cd4b98a32e900cdb76a3025b3b500: Running coprocessor pre-open hook at 1732226730186Writing region info on filesystem at 1732226730186Initializing all the Stores at 1732226730187 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732226730187Cleaning up temporary data from old regions at 1732226730192 (+5 ms)Running coprocessor post-open hooks at 1732226730198 (+6 ms)Region opened successfully at 1732226730198 2024-11-21T22:05:30,198 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926., pid=196, masterSystemTime=1732226730180 2024-11-21T22:05:30,198 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500., pid=197, masterSystemTime=1732226730181 2024-11-21T22:05:30,200 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:30,200 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:30,200 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=f61de7dd7ba8c7a2616adffdda464926, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:05:30,201 DEBUG [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,201 INFO [RS_OPEN_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,202 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=3f2cd4b98a32e900cdb76a3025b3b500, regionState=OPEN, openSeqNum=2, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:05:30,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure f61de7dd7ba8c7a2616adffdda464926, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:05:30,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:05:30,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-11-21T22:05:30,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure f61de7dd7ba8c7a2616adffdda464926, server=2a884b411602,36419,1732226515183 in 176 msec 2024-11-21T22:05:30,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=194 2024-11-21T22:05:30,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, ASSIGN in 338 msec 2024-11-21T22:05:30,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500, server=2a884b411602,41399,1732226514979 in 176 msec 2024-11-21T22:05:30,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=194, resume processing ppid=193 2024-11-21T22:05:30,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, ASSIGN in 339 msec 2024-11-21T22:05:30,213 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T22:05:30,213 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226730213"}]},"ts":"1732226730213"} 2024-11-21T22:05:30,215 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-21T22:05:30,216 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T22:05:30,216 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-21T22:05:30,219 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-21T22:05:30,248 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:30,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:30,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:30,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:30,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 459 msec 2024-11-21T22:05:30,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T22:05:30,431 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T22:05:30,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-21T22:05:30,432 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:05:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-21T22:05:30,435 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:05:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-21T22:05:30,436 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T22:05:30,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T22:05:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226730438 (current time:1732226730438). 2024-11-21T22:05:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:05:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-21T22:05:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:05:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a455c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:05:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:05:30,440 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:05:30,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:05:30,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:05:30,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5692d26f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:05:30,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:05:30,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,441 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:05:30,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3d0bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:05:30,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:05:30,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:05:30,444 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60590, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:05:30,445 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:05:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:05:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,445 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:05:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a56739e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:05:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:05:30,446 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:05:30,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:05:30,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:05:30,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68d94dc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:05:30,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:05:30,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,447 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47778, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:05:30,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@357f704a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:05:30,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:05:30,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:05:30,450 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:05:30,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:05:30,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:05:30,453 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:05:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:05:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,453 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:05:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-21T22:05:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:05:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T22:05:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-21T22:05:30,456 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:05:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-21T22:05:30,457 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:05:30,459 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:05:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742300_1476 (size=203) 2024-11-21T22:05:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742300_1476 (size=203) 2024-11-21T22:05:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742300_1476 (size=203) 2024-11-21T22:05:30,469 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:05:30,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926}] 2024-11-21T22:05:30,470 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,470 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-21T22:05:30,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-21T22:05:30,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for 3f2cd4b98a32e900cdb76a3025b3b500: 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for f61de7dd7ba8c7a2616adffdda464926: 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:30,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:30,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:05:30,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:05:30,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:05:30,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T22:05:30,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742302_1478 (size=82) 2024-11-21T22:05:30,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742302_1478 (size=82) 2024-11-21T22:05:30,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742302_1478 (size=82) 2024-11-21T22:05:30,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-21T22:05:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-21T22:05:30,629 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,629 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500 in 161 msec 2024-11-21T22:05:30,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742301_1477 (size=82) 2024-11-21T22:05:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742301_1477 (size=82) 2024-11-21T22:05:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742301_1477 (size=82) 2024-11-21T22:05:30,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:30,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-21T22:05:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-21T22:05:30,637 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,637 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=200, resume processing ppid=198 2024-11-21T22:05:30,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926 in 169 msec 2024-11-21T22:05:30,639 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:05:30,640 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:05:30,640 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:05:30,640 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:30,641 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:30,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742303_1479 (size=585) 2024-11-21T22:05:30,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742303_1479 (size=585) 2024-11-21T22:05:30,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742303_1479 (size=585) 2024-11-21T22:05:30,652 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:05:30,656 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:05:30,657 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:30,658 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:05:30,658 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-21T22:05:30,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 204 msec 2024-11-21T22:05:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-21T22:05:30,772 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T22:05:30,775 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='06c8d1be2ad2cd531e3a9ed8a457cde8b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500., hostname=2a884b411602,41399,1732226514979, seqNum=2] 2024-11-21T22:05:30,776 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='1425c0527f22291d1e8c3769147ab8185', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,777 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='26def718ef6c396f31f83d02e7914b10d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,778 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='3bfd6b3465999bab6c6d7ca2c6897b503', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,779 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='494e1f76645461d391d17e1fcbe9dd72b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,779 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='57a29e9e380a006a64ba4fba15c567e03', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41399 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:05:30,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36419 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T22:05:30,783 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T22:05:30,785 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:30,785 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,785 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T22:05:30,787 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T22:05:30,792 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T22:05:30,797 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T22:05:30,800 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T22:05:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732226730800 (current time:1732226730800). 2024-11-21T22:05:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T22:05:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-21T22:05:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T22:05:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e1e7857, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:05:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:05:30,801 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:05:30,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:05:30,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:05:30,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1272fd23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:05:30,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:05:30,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,802 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47792, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:05:30,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@300905ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:05:30,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:05:30,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:05:30,805 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:05:30,806 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:05:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:05:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,806 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:05:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b85b5d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ClusterIdFetcher(90): Going to request 2a884b411602,42183,-1 for getting cluster id 2024-11-21T22:05:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T22:05:30,807 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a66a9a66-f6cf-4476-80e0-53486503c165' 2024-11-21T22:05:30,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T22:05:30,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a66a9a66-f6cf-4476-80e0-53486503c165" 2024-11-21T22:05:30,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53af1f09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2a884b411602,42183,-1] 2024-11-21T22:05:30,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T22:05:30,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,808 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T22:05:30,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dbbd14f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T22:05:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T22:05:30,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2a884b411602,41399,1732226514979, seqNum=-1] 2024-11-21T22:05:30,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:05:30,811 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:05:30,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., hostname=2a884b411602,36419,1732226515183, seqNum=2] 2024-11-21T22:05:30,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T22:05:30,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T22:05:30,815 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183. 2024-11-21T22:05:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor246.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T22:05:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:05:30,815 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:05:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-21T22:05:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T22:05:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T22:05:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-21T22:05:30,817 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T22:05:30,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-21T22:05:30,818 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T22:05:30,821 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T22:05:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742304_1480 (size=198) 2024-11-21T22:05:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742304_1480 (size=198) 2024-11-21T22:05:30,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742304_1480 (size=198) 2024-11-21T22:05:30,828 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T22:05:30,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926}] 2024-11-21T22:05:30,828 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:30,828 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:30,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-21T22:05:30,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36419 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-21T22:05:30,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41399 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-21T22:05:30,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:30,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:30,981 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing 3f2cd4b98a32e900cdb76a3025b3b500 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-21T22:05:30,981 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing f61de7dd7ba8c7a2616adffdda464926 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-21T22:05:30,999 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/.tmp/cf/7f64cd1ef1f14c2790ec7e52f494812b is 71, key is 29acc9fbbb267810a37dbdd7e2cb8968/cf:q/1732226730782/Put/seqid=0 2024-11-21T22:05:31,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/.tmp/cf/3657fe3fba9f429f9596dcf4d22808c8 is 71, key is 0f605111a2f74f177eef84a71d2848bc/cf:q/1732226730780/Put/seqid=0 2024-11-21T22:05:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742305_1481 (size=8394) 2024-11-21T22:05:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742305_1481 (size=8394) 2024-11-21T22:05:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742305_1481 (size=8394) 2024-11-21T22:05:31,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/.tmp/cf/7f64cd1ef1f14c2790ec7e52f494812b 2024-11-21T22:05:31,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742306_1482 (size=5216) 2024-11-21T22:05:31,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742306_1482 (size=5216) 2024-11-21T22:05:31,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742306_1482 (size=5216) 2024-11-21T22:05:31,007 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/.tmp/cf/3657fe3fba9f429f9596dcf4d22808c8 2024-11-21T22:05:31,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/.tmp/cf/7f64cd1ef1f14c2790ec7e52f494812b as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf/7f64cd1ef1f14c2790ec7e52f494812b 2024-11-21T22:05:31,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/.tmp/cf/3657fe3fba9f429f9596dcf4d22808c8 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf/3657fe3fba9f429f9596dcf4d22808c8 2024-11-21T22:05:31,018 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf/7f64cd1ef1f14c2790ec7e52f494812b, entries=48, sequenceid=6, filesize=8.2 K 2024-11-21T22:05:31,018 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf/3657fe3fba9f429f9596dcf4d22808c8, entries=2, sequenceid=6, filesize=5.1 K 2024-11-21T22:05:31,019 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for f61de7dd7ba8c7a2616adffdda464926 in 39ms, sequenceid=6, compaction requested=false 2024-11-21T22:05:31,019 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 3f2cd4b98a32e900cdb76a3025b3b500 in 39ms, sequenceid=6, compaction requested=false 2024-11-21T22:05:31,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-21T22:05:31,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-21T22:05:31,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for f61de7dd7ba8c7a2616adffdda464926: 2024-11-21T22:05:31,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 3f2cd4b98a32e900cdb76a3025b3b500: 2024-11-21T22:05:31,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T22:05:31,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf/7f64cd1ef1f14c2790ec7e52f494812b] hfiles 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf/7f64cd1ef1f14c2790ec7e52f494812b for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf/3657fe3fba9f429f9596dcf4d22808c8] hfiles 2024-11-21T22:05:31,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf/3657fe3fba9f429f9596dcf4d22808c8 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742308_1484 (size=121) 2024-11-21T22:05:31,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742308_1484 (size=121) 2024-11-21T22:05:31,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742307_1483 (size=121) 2024-11-21T22:05:31,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742308_1484 (size=121) 2024-11-21T22:05:31,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742307_1483 (size=121) 2024-11-21T22:05:31,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:31,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-21T22:05:31,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742307_1483 (size=121) 2024-11-21T22:05:31,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-21T22:05:31,034 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:31,034 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:31,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:31,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/2a884b411602:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-21T22:05:31,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-21T22:05:31,034 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:31,035 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:31,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f61de7dd7ba8c7a2616adffdda464926 in 206 msec 2024-11-21T22:05:31,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=201 2024-11-21T22:05:31,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500 in 208 msec 2024-11-21T22:05:31,037 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T22:05:31,038 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T22:05:31,038 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T22:05:31,038 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,039 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742309_1485 (size=663) 2024-11-21T22:05:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742309_1485 (size=663) 2024-11-21T22:05:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742309_1485 (size=663) 2024-11-21T22:05:31,050 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T22:05:31,054 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T22:05:31,055 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,056 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T22:05:31,056 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-21T22:05:31,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 240 msec 2024-11-21T22:05:31,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-21T22:05:31,132 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T22:05:31,132 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132 2024-11-21T22:05:31,132 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:40237, tgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132, rawTgtDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132, srcFsUri=hdfs://localhost:40237, srcDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:05:31,157 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:40237, inputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90 2024-11-21T22:05:31,157 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T22:05:31,162 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:31,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742311_1487 (size=663) 2024-11-21T22:05:31,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742311_1487 (size=663) 2024-11-21T22:05:31,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742311_1487 (size=663) 2024-11-21T22:05:31,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742310_1486 (size=198) 2024-11-21T22:05:31,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742310_1486 (size=198) 2024-11-21T22:05:31,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742310_1486 (size=198) 2024-11-21T22:05:31,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:31,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:31,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-13679159258434420738.jar 2024-11-21T22:05:32,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop-10065248099194637377.jar 2024-11-21T22:05:32,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T22:05:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T22:05:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T22:05:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T22:05:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T22:05:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T22:05:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T22:05:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T22:05:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T22:05:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T22:05:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T22:05:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T22:05:32,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:32,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:32,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:05:32,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:32,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T22:05:32,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:05:32,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T22:05:32,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742312_1488 (size=131440) 2024-11-21T22:05:32,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742312_1488 (size=131440) 2024-11-21T22:05:32,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742312_1488 (size=131440) 2024-11-21T22:05:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742313_1489 (size=4188619) 2024-11-21T22:05:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742313_1489 (size=4188619) 2024-11-21T22:05:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742313_1489 (size=4188619) 2024-11-21T22:05:32,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742314_1490 (size=1323991) 2024-11-21T22:05:32,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742314_1490 (size=1323991) 2024-11-21T22:05:32,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742314_1490 (size=1323991) 2024-11-21T22:05:32,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742315_1491 (size=903739) 2024-11-21T22:05:32,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742315_1491 (size=903739) 2024-11-21T22:05:32,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742315_1491 (size=903739) 2024-11-21T22:05:32,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742316_1492 (size=8360083) 2024-11-21T22:05:32,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742316_1492 (size=8360083) 2024-11-21T22:05:32,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742316_1492 (size=8360083) 2024-11-21T22:05:32,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742317_1493 (size=1877034) 2024-11-21T22:05:32,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742317_1493 (size=1877034) 2024-11-21T22:05:32,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742317_1493 (size=1877034) 2024-11-21T22:05:32,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742318_1494 (size=6424742) 2024-11-21T22:05:32,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742318_1494 (size=6424742) 2024-11-21T22:05:32,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742318_1494 (size=6424742) 2024-11-21T22:05:32,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742319_1495 (size=77835) 2024-11-21T22:05:32,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742319_1495 (size=77835) 2024-11-21T22:05:32,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742319_1495 (size=77835) 2024-11-21T22:05:32,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742320_1496 (size=30949) 2024-11-21T22:05:32,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742320_1496 (size=30949) 2024-11-21T22:05:32,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742320_1496 (size=30949) 2024-11-21T22:05:32,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742321_1497 (size=1597347) 2024-11-21T22:05:32,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742321_1497 (size=1597347) 2024-11-21T22:05:32,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742321_1497 (size=1597347) 2024-11-21T22:05:32,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742322_1498 (size=4695811) 2024-11-21T22:05:32,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742322_1498 (size=4695811) 2024-11-21T22:05:32,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742322_1498 (size=4695811) 2024-11-21T22:05:32,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742323_1499 (size=232957) 2024-11-21T22:05:32,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742323_1499 (size=232957) 2024-11-21T22:05:32,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742323_1499 (size=232957) 2024-11-21T22:05:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742324_1500 (size=127628) 2024-11-21T22:05:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742324_1500 (size=127628) 2024-11-21T22:05:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742324_1500 (size=127628) 2024-11-21T22:05:32,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742325_1501 (size=20406) 2024-11-21T22:05:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742325_1501 (size=20406) 2024-11-21T22:05:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742325_1501 (size=20406) 2024-11-21T22:05:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742326_1502 (size=5175431) 2024-11-21T22:05:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742326_1502 (size=5175431) 2024-11-21T22:05:32,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742326_1502 (size=5175431) 2024-11-21T22:05:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742327_1503 (size=217634) 2024-11-21T22:05:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742327_1503 (size=217634) 2024-11-21T22:05:32,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742327_1503 (size=217634) 2024-11-21T22:05:32,452 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0009/container_1732226522247_0009_01_000002/launch_container.sh] 2024-11-21T22:05:32,452 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0009/container_1732226522247_0009_01_000002/container_tokens] 2024-11-21T22:05:32,452 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_2/usercache/jenkins/appcache/application_1732226522247_0009/container_1732226522247_0009_01_000002/sysfs] 2024-11-21T22:05:32,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742328_1504 (size=1832290) 2024-11-21T22:05:32,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742328_1504 (size=1832290) 2024-11-21T22:05:32,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742328_1504 (size=1832290) 2024-11-21T22:05:32,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742329_1505 (size=322274) 2024-11-21T22:05:32,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742329_1505 (size=322274) 2024-11-21T22:05:32,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742329_1505 (size=322274) 2024-11-21T22:05:32,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742330_1506 (size=503880) 2024-11-21T22:05:32,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742330_1506 (size=503880) 2024-11-21T22:05:32,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742330_1506 (size=503880) 2024-11-21T22:05:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742331_1507 (size=29229) 2024-11-21T22:05:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742331_1507 (size=29229) 2024-11-21T22:05:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742331_1507 (size=29229) 2024-11-21T22:05:32,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742332_1508 (size=24096) 2024-11-21T22:05:32,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742332_1508 (size=24096) 2024-11-21T22:05:32,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742332_1508 (size=24096) 2024-11-21T22:05:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742333_1509 (size=111872) 2024-11-21T22:05:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742333_1509 (size=111872) 2024-11-21T22:05:32,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742333_1509 (size=111872) 2024-11-21T22:05:32,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742334_1510 (size=440656) 2024-11-21T22:05:32,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742334_1510 (size=440656) 2024-11-21T22:05:32,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742334_1510 (size=440656) 2024-11-21T22:05:32,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742335_1511 (size=45609) 2024-11-21T22:05:32,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742335_1511 (size=45609) 2024-11-21T22:05:32,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742335_1511 (size=45609) 2024-11-21T22:05:32,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742336_1512 (size=136454) 2024-11-21T22:05:32,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742336_1512 (size=136454) 2024-11-21T22:05:32,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742336_1512 (size=136454) 2024-11-21T22:05:32,548 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T22:05:32,550 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-21T22:05:32,552 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T22:05:32,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742337_1513 (size=366) 2024-11-21T22:05:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742337_1513 (size=366) 2024-11-21T22:05:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742337_1513 (size=366) 2024-11-21T22:05:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742338_1514 (size=15) 2024-11-21T22:05:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742338_1514 (size=15) 2024-11-21T22:05:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742338_1514 (size=15) 2024-11-21T22:05:32,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742339_1515 (size=303906) 2024-11-21T22:05:32,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742339_1515 (size=303906) 2024-11-21T22:05:32,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742339_1515 (size=303906) 2024-11-21T22:05:33,493 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:05:33,493 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T22:05:33,495 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0009_000001 (auth:SIMPLE) from 127.0.0.1:39024 2024-11-21T22:05:33,507 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0009/container_1732226522247_0009_01_000001/launch_container.sh] 2024-11-21T22:05:33,507 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0009/container_1732226522247_0009_01_000001/container_tokens] 2024-11-21T22:05:33,507 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_3/usercache/jenkins/appcache/application_1732226522247_0009/container_1732226522247_0009_01_000001/sysfs] 2024-11-21T22:05:33,643 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0010_000001 (auth:SIMPLE) from 127.0.0.1:59290 2024-11-21T22:05:34,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:34,423 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-21T22:05:34,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-21T22:05:35,072 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:05:39,000 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0010_000001 (auth:SIMPLE) from 127.0.0.1:43146 2024-11-21T22:05:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742340_1516 (size=349580) 2024-11-21T22:05:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742340_1516 (size=349580) 2024-11-21T22:05:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742340_1516 (size=349580) 2024-11-21T22:05:40,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f7a1444316755d092bee2d182dde7a0c, had cached 0 bytes from a total of 5422 2024-11-21T22:05:40,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7c3e82354b20e9e6eb82600246f4d616, had cached 0 bytes from a total of 8190 2024-11-21T22:05:41,196 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0010_000001 (auth:SIMPLE) from 127.0.0.1:39026 2024-11-21T22:05:44,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742341_1517 (size=8394) 2024-11-21T22:05:44,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742341_1517 (size=8394) 2024-11-21T22:05:44,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742341_1517 (size=8394) 2024-11-21T22:05:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742342_1518 (size=5216) 2024-11-21T22:05:44,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742342_1518 (size=5216) 2024-11-21T22:05:44,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742342_1518 (size=5216) 2024-11-21T22:05:44,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742343_1519 (size=17455) 2024-11-21T22:05:44,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742343_1519 (size=17455) 2024-11-21T22:05:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742343_1519 (size=17455) 2024-11-21T22:05:44,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742344_1520 (size=476) 2024-11-21T22:05:44,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742344_1520 (size=476) 2024-11-21T22:05:44,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742344_1520 (size=476) 2024-11-21T22:05:44,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742345_1521 (size=17455) 2024-11-21T22:05:44,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742345_1521 (size=17455) 2024-11-21T22:05:44,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742345_1521 (size=17455) 2024-11-21T22:05:44,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742346_1522 (size=349580) 2024-11-21T22:05:44,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742346_1522 (size=349580) 2024-11-21T22:05:44,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742346_1522 (size=349580) 2024-11-21T22:05:44,743 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0010_000001 (auth:SIMPLE) from 127.0.0.1:55112 2024-11-21T22:05:44,746 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_0/usercache/jenkins/appcache/application_1732226522247_0010/container_1732226522247_0010_01_000002/launch_container.sh] 2024-11-21T22:05:44,746 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_0/usercache/jenkins/appcache/application_1732226522247_0010/container_1732226522247_0010_01_000002/container_tokens] 2024-11-21T22:05:44,746 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-1_0/usercache/jenkins/appcache/application_1732226522247_0010/container_1732226522247_0010_01_000002/sysfs] 2024-11-21T22:05:46,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T22:05:46,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T22:05:46,709 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:46,710 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T22:05:46,710 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T22:05:46,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:46,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-21T22:05:46,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-21T22:05:46,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_980951471_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:46,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-21T22:05:46,711 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/export-test/export-1732226731132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-21T22:05:46,715 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:46,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-21T22:05:46,718 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226746718"}]},"ts":"1732226746718"} 2024-11-21T22:05:46,719 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-21T22:05:46,719 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-21T22:05:46,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-21T22:05:46,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, UNASSIGN}] 2024-11-21T22:05:46,721 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, UNASSIGN 2024-11-21T22:05:46,721 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, UNASSIGN 2024-11-21T22:05:46,722 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=f61de7dd7ba8c7a2616adffdda464926, regionState=CLOSING, regionLocation=2a884b411602,36419,1732226515183 2024-11-21T22:05:46,722 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=3f2cd4b98a32e900cdb76a3025b3b500, regionState=CLOSING, regionLocation=2a884b411602,41399,1732226514979 2024-11-21T22:05:46,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, UNASSIGN because future has completed 2024-11-21T22:05:46,724 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:05:46,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500, server=2a884b411602,41399,1732226514979}] 2024-11-21T22:05:46,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, UNASSIGN because future has completed 2024-11-21T22:05:46,724 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T22:05:46,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure f61de7dd7ba8c7a2616adffdda464926, server=2a884b411602,36419,1732226515183}] 2024-11-21T22:05:46,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-21T22:05:46,876 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:46,876 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:05:46,876 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing 3f2cd4b98a32e900cdb76a3025b3b500, disabling compactions & flushes 2024-11-21T22:05:46,876 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:46,876 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:46,876 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. after waiting 0 ms 2024-11-21T22:05:46,876 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:46,877 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:46,877 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T22:05:46,877 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing f61de7dd7ba8c7a2616adffdda464926, disabling compactions & flushes 2024-11-21T22:05:46,877 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:46,877 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:46,877 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. after waiting 0 ms 2024-11-21T22:05:46,877 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:46,880 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:05:46,880 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T22:05:46,881 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:05:46,881 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:05:46,881 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500. 2024-11-21T22:05:46,881 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926. 2024-11-21T22:05:46,881 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for f61de7dd7ba8c7a2616adffdda464926: Waiting for close lock at 1732226746877Running coprocessor pre-close hooks at 1732226746877Disabling compacts and flushes for region at 1732226746877Disabling writes for close at 1732226746877Writing region close event to WAL at 1732226746877Running coprocessor post-close hooks at 1732226746881 (+4 ms)Closed at 1732226746881 2024-11-21T22:05:46,881 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for 3f2cd4b98a32e900cdb76a3025b3b500: Waiting for close lock at 1732226746876Running coprocessor pre-close hooks at 1732226746876Disabling compacts and flushes for region at 1732226746876Disabling writes for close at 1732226746876Writing region close event to WAL at 1732226746877 (+1 ms)Running coprocessor post-close hooks at 1732226746881 (+4 ms)Closed at 1732226746881 2024-11-21T22:05:46,882 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:46,883 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=f61de7dd7ba8c7a2616adffdda464926, regionState=CLOSED 2024-11-21T22:05:46,883 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed 3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:46,883 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=3f2cd4b98a32e900cdb76a3025b3b500, regionState=CLOSED 2024-11-21T22:05:46,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure f61de7dd7ba8c7a2616adffdda464926, server=2a884b411602,36419,1732226515183 because future has completed 2024-11-21T22:05:46,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500, server=2a884b411602,41399,1732226514979 because future has completed 2024-11-21T22:05:46,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-11-21T22:05:46,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure f61de7dd7ba8c7a2616adffdda464926, server=2a884b411602,36419,1732226515183 in 161 msec 2024-11-21T22:05:46,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=206 2024-11-21T22:05:46,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f61de7dd7ba8c7a2616adffdda464926, UNASSIGN in 165 msec 2024-11-21T22:05:46,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure 3f2cd4b98a32e900cdb76a3025b3b500, server=2a884b411602,41399,1732226514979 in 162 msec 2024-11-21T22:05:46,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=206, resume processing ppid=205 2024-11-21T22:05:46,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3f2cd4b98a32e900cdb76a3025b3b500, UNASSIGN in 166 msec 2024-11-21T22:05:46,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-21T22:05:46,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 170 msec 2024-11-21T22:05:46,891 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732226746891"}]},"ts":"1732226746891"} 2024-11-21T22:05:46,893 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-21T22:05:46,893 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-21T22:05:46,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 178 msec 2024-11-21T22:05:47,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-21T22:05:47,031 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T22:05:47,031 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,033 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,034 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,035 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36419 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,037 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:47,037 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:47,038 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/recovered.edits] 2024-11-21T22:05:47,038 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf, FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/recovered.edits] 2024-11-21T22:05:47,041 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf/3657fe3fba9f429f9596dcf4d22808c8 to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/cf/3657fe3fba9f429f9596dcf4d22808c8 2024-11-21T22:05:47,041 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf/7f64cd1ef1f14c2790ec7e52f494812b to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/cf/7f64cd1ef1f14c2790ec7e52f494812b 2024-11-21T22:05:47,043 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926/recovered.edits/9.seqid 2024-11-21T22:05:47,043 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/recovered.edits/9.seqid to hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500/recovered.edits/9.seqid 2024-11-21T22:05:47,044 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/f61de7dd7ba8c7a2616adffdda464926 2024-11-21T22:05:47,044 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testtb-testExportFileSystemStateWithSkipTmp/3f2cd4b98a32e900cdb76a3025b3b500 2024-11-21T22:05:47,044 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-21T22:05:47,045 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,048 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-21T22:05:47,050 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-21T22:05:47,051 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,051 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-21T22:05:47,051 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226747051"}]},"ts":"9223372036854775807"} 2024-11-21T22:05:47,051 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732226747051"}]},"ts":"9223372036854775807"} 2024-11-21T22:05:47,053 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T22:05:47,053 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3f2cd4b98a32e900cdb76a3025b3b500, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732226729797.3f2cd4b98a32e900cdb76a3025b3b500.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f61de7dd7ba8c7a2616adffdda464926, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732226729797.f61de7dd7ba8c7a2616adffdda464926.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T22:05:47,053 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-21T22:05:47,053 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732226747053"}]},"ts":"9223372036854775807"} 2024-11-21T22:05:47,054 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-21T22:05:47,055 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 24 msec 2024-11-21T22:05:47,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,079 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,079 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T22:05:47,079 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T22:05:47,079 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T22:05:47,079 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T22:05:47,087 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:47,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:47,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:47,087 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T22:05:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-21T22:05:47,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:47,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:47,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:47,088 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,088 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T22:05:47,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T22:05:47,093 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-21T22:05:47,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,096 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-21T22:05:47,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:47,114 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=813 (was 805) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:43060 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40753 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:46300 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1643116038_1 at /127.0.0.1:43032 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_980951471_22 at /127.0.0.1:38900 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 138441) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7234 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1643116038_1 at /127.0.0.1:46286 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:40753 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 806) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=559 (was 551) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 17), AvailableMemoryMB=2576 (was 2373) - AvailableMemoryMB LEAK? - 2024-11-21T22:05:47,115 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-11-21T22:05:47,115 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-21T22:05:47,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43a89134{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T22:05:47,122 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c9f43e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:05:47,122 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:05:47,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45660404{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T22:05:47,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@266df67f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:05:50,801 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732226522247_0010_000001 (auth:SIMPLE) from 127.0.0.1:48762 2024-11-21T22:05:50,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_0/usercache/jenkins/appcache/application_1732226522247_0010/container_1732226522247_0010_01_000001/launch_container.sh] 2024-11-21T22:05:50,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_0/usercache/jenkins/appcache/application_1732226522247_0010/container_1732226522247_0010_01_000001/container_tokens] 2024-11-21T22:05:50,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1461191534/yarn-5917417586/MiniMRCluster_1461191534-localDir-nm-0_0/usercache/jenkins/appcache/application_1732226522247_0010/container_1732226522247_0010_01_000001/sysfs] 2024-11-21T22:05:52,403 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:05:53,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:05:54,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T22:05:57,142 WARN [regionserver/2a884b411602:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-11-21T22:05:59,925 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:06:04,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15f0bbad{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T22:06:04,134 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51bef505{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:06:04,135 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:06:04,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3242ffcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T22:06:04,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8b95114{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:06:21,150 ERROR [Thread[Thread-397,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-21T22:06:21,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67be7839{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-21T22:06:21,152 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f77c49c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:06:21,152 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:06:21,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57d25206{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T22:06:21,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ac57c8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:06:21,156 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-21T22:06:21,161 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-21T22:06:21,161 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-21T22:06:21,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741830_1006 (size=968965) 2024-11-21T22:06:21,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741830_1006 (size=968965) 2024-11-21T22:06:21,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741830_1006 (size=968965) 2024-11-21T22:06:21,165 ERROR [Thread[Thread-426,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-21T22:06:21,169 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c0e50ac{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-21T22:06:21,169 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fd2a6fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:06:21,169 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:06:21,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@140feb75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T22:06:21,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1094c217{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:06:21,172 ERROR [Thread[Thread-385,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-21T22:06:21,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-21T22:06:21,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T22:06:21,172 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T22:06:21,172 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T22:06:21,172 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,172 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,172 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T22:06:21,172 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T22:06:21,172 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=558713208, stopped=false 2024-11-21T22:06:21,173 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,173 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-21T22:06:21,173 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2a884b411602,42183,1732226514042 2024-11-21T22:06:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T22:06:21,300 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T22:06:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:06:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T22:06:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T22:06:21,300 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:06:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:06:21,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:06:21,300 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T22:06:21,301 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T22:06:21,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:06:21,302 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T22:06:21,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:06:21,303 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:06:21,303 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T22:06:21,303 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2a884b411602,41399,1732226514979' ***** 2024-11-21T22:06:21,303 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,303 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T22:06:21,304 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2a884b411602,44629,1732226515126' ***** 2024-11-21T22:06:21,304 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,304 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T22:06:21,304 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2a884b411602,36419,1732226515183' ***** 2024-11-21T22:06:21,304 INFO [RS:0;2a884b411602:41399 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T22:06:21,304 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,304 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T22:06:21,304 INFO [RS:0;2a884b411602:41399 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T22:06:21,305 INFO [RS:2;2a884b411602:36419 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T22:06:21,305 INFO [RS:0;2a884b411602:41399 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T22:06:21,305 INFO [RS:1;2a884b411602:44629 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T22:06:21,305 INFO [RS:2;2a884b411602:36419 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T22:06:21,305 INFO [RS:2;2a884b411602:36419 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T22:06:21,305 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(959): stopping server 2a884b411602,41399,1732226514979 2024-11-21T22:06:21,305 INFO [RS:1;2a884b411602:44629 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T22:06:21,305 INFO [RS:0;2a884b411602:41399 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T22:06:21,305 INFO [RS:1;2a884b411602:44629 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T22:06:21,305 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(3091): Received CLOSE for 807d59511a09f362346016a4b356021c 2024-11-21T22:06:21,305 INFO [RS:0;2a884b411602:41399 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2a884b411602:41399. 2024-11-21T22:06:21,305 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(3091): Received CLOSE for f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:06:21,305 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T22:06:21,305 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T22:06:21,305 DEBUG [RS:0;2a884b411602:41399 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T22:06:21,305 DEBUG [RS:0;2a884b411602:41399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,305 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T22:06:21,306 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(3091): Received CLOSE for 7c3e82354b20e9e6eb82600246f4d616 2024-11-21T22:06:21,306 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(959): stopping server 2a884b411602,36419,1732226515183 2024-11-21T22:06:21,306 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(959): stopping server 2a884b411602,44629,1732226515126 2024-11-21T22:06:21,306 INFO [RS:2;2a884b411602:36419 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T22:06:21,306 INFO [RS:1;2a884b411602:44629 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T22:06:21,306 INFO [RS:0;2a884b411602:41399 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T22:06:21,306 INFO [RS:2;2a884b411602:36419 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2a884b411602:36419. 2024-11-21T22:06:21,306 INFO [RS:1;2a884b411602:44629 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2a884b411602:44629. 2024-11-21T22:06:21,306 INFO [RS:0;2a884b411602:41399 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T22:06:21,306 INFO [RS:0;2a884b411602:41399 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T22:06:21,306 DEBUG [RS:2;2a884b411602:36419 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T22:06:21,306 DEBUG [RS:2;2a884b411602:36419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,306 DEBUG [RS:1;2a884b411602:44629 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T22:06:21,306 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T22:06:21,306 DEBUG [RS:1;2a884b411602:44629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,306 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 807d59511a09f362346016a4b356021c, disabling compactions & flushes 2024-11-21T22:06:21,306 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:06:21,306 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T22:06:21,306 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T22:06:21,306 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:06:21,307 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1325): Online Regions={f7a1444316755d092bee2d182dde7a0c=testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c.} 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. after waiting 0 ms 2024-11-21T22:06:21,307 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1325): Online Regions={807d59511a09f362346016a4b356021c=hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., 7c3e82354b20e9e6eb82600246f4d616=testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616.} 2024-11-21T22:06:21,307 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:06:21,307 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T22:06:21,307 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 807d59511a09f362346016a4b356021c 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f7a1444316755d092bee2d182dde7a0c, disabling compactions & flushes 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T22:06:21,307 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:06:21,307 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:06:21,307 DEBUG [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1351): Waiting on 7c3e82354b20e9e6eb82600246f4d616, 807d59511a09f362346016a4b356021c 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T22:06:21,307 DEBUG [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. after waiting 0 ms 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T22:06:21,307 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:06:21,307 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-21T22:06:21,308 DEBUG [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1351): Waiting on f7a1444316755d092bee2d182dde7a0c 2024-11-21T22:06:21,315 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/f7a1444316755d092bee2d182dde7a0c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T22:06:21,316 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,316 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:06:21,316 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f7a1444316755d092bee2d182dde7a0c: Waiting for close lock at 1732226781307Running coprocessor pre-close hooks at 1732226781307Disabling compacts and flushes for region at 1732226781307Disabling writes for close at 1732226781307Writing region close event to WAL at 1732226781308 (+1 ms)Running coprocessor post-close hooks at 1732226781316 (+8 ms)Closed at 1732226781316 2024-11-21T22:06:21,316 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732226649924.f7a1444316755d092bee2d182dde7a0c. 2024-11-21T22:06:21,325 INFO [regionserver/2a884b411602:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T22:06:21,328 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/.tmp/l/2ccc8536ee8646a6ad7852bd1ac14f3d is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732226648077/DeleteFamily/seqid=0 2024-11-21T22:06:21,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742347_1523 (size=5695) 2024-11-21T22:06:21,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742347_1523 (size=5695) 2024-11-21T22:06:21,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742347_1523 (size=5695) 2024-11-21T22:06:21,333 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/.tmp/l/2ccc8536ee8646a6ad7852bd1ac14f3d 2024-11-21T22:06:21,335 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/info/8ffc0e5970734ae6b95e5397ff98c98c is 173, key is testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616./info:regioninfo/1732226650313/Put/seqid=0 2024-11-21T22:06:21,338 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2ccc8536ee8646a6ad7852bd1ac14f3d 2024-11-21T22:06:21,339 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/.tmp/l/2ccc8536ee8646a6ad7852bd1ac14f3d as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/l/2ccc8536ee8646a6ad7852bd1ac14f3d 2024-11-21T22:06:21,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742348_1524 (size=14362) 2024-11-21T22:06:21,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742348_1524 (size=14362) 2024-11-21T22:06:21,341 INFO [regionserver/2a884b411602:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T22:06:21,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742348_1524 (size=14362) 2024-11-21T22:06:21,341 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/info/8ffc0e5970734ae6b95e5397ff98c98c 2024-11-21T22:06:21,343 INFO [regionserver/2a884b411602:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T22:06:21,344 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2ccc8536ee8646a6ad7852bd1ac14f3d 2024-11-21T22:06:21,344 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/l/2ccc8536ee8646a6ad7852bd1ac14f3d, entries=12, sequenceid=27, filesize=5.6 K 2024-11-21T22:06:21,345 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 807d59511a09f362346016a4b356021c in 38ms, sequenceid=27, compaction requested=false 2024-11-21T22:06:21,348 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/acl/807d59511a09f362346016a4b356021c/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-21T22:06:21,348 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,348 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:06:21,348 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 807d59511a09f362346016a4b356021c: Waiting for close lock at 1732226781306Running coprocessor pre-close hooks at 1732226781306Disabling compacts and flushes for region at 1732226781306Disabling writes for close at 1732226781307 (+1 ms)Obtaining lock to block concurrent updates at 1732226781307Preparing flush snapshotting stores in 807d59511a09f362346016a4b356021c at 1732226781307Finished memstore snapshotting hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1732226781307Flushing stores of hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. at 1732226781309 (+2 ms)Flushing 807d59511a09f362346016a4b356021c/l: creating writer at 1732226781309Flushing 807d59511a09f362346016a4b356021c/l: appending metadata at 1732226781327 (+18 ms)Flushing 807d59511a09f362346016a4b356021c/l: closing flushed file at 1732226781327Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@edc8bf4: reopening flushed file at 1732226781339 (+12 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 807d59511a09f362346016a4b356021c in 38ms, sequenceid=27, compaction requested=false at 1732226781345 (+6 ms)Writing region close event to WAL at 1732226781345Running coprocessor post-close hooks at 1732226781348 (+3 ms)Closed at 1732226781348 2024-11-21T22:06:21,349 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732226518024.807d59511a09f362346016a4b356021c. 2024-11-21T22:06:21,349 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7c3e82354b20e9e6eb82600246f4d616, disabling compactions & flushes 2024-11-21T22:06:21,349 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:06:21,349 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:06:21,349 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. after waiting 0 ms 2024-11-21T22:06:21,349 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:06:21,352 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/default/testExportExpiredSnapshot/7c3e82354b20e9e6eb82600246f4d616/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T22:06:21,352 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,352 INFO [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:06:21,352 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7c3e82354b20e9e6eb82600246f4d616: Waiting for close lock at 1732226781349Running coprocessor pre-close hooks at 1732226781349Disabling compacts and flushes for region at 1732226781349Disabling writes for close at 1732226781349Writing region close event to WAL at 1732226781349Running coprocessor post-close hooks at 1732226781352 (+3 ms)Closed at 1732226781352 2024-11-21T22:06:21,352 DEBUG [RS_CLOSE_REGION-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732226649924.7c3e82354b20e9e6eb82600246f4d616. 2024-11-21T22:06:21,361 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/ns/90c4212090614e4fa20d881fc534a323 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1./ns:/1732226648100/DeleteFamily/seqid=0 2024-11-21T22:06:21,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742349_1525 (size=7779) 2024-11-21T22:06:21,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742349_1525 (size=7779) 2024-11-21T22:06:21,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742349_1525 (size=7779) 2024-11-21T22:06:21,366 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/ns/90c4212090614e4fa20d881fc534a323 2024-11-21T22:06:21,383 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/rep_barrier/998e89e9209f4465ab1aceb23a28d115 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1./rep_barrier:/1732226648100/DeleteFamily/seqid=0 2024-11-21T22:06:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742350_1526 (size=8005) 2024-11-21T22:06:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742350_1526 (size=8005) 2024-11-21T22:06:21,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742350_1526 (size=8005) 2024-11-21T22:06:21,388 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/rep_barrier/998e89e9209f4465ab1aceb23a28d115 2024-11-21T22:06:21,406 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/table/dacfad53289b459b80993892e9b05d1f is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732226630417.e8c8a757ef6be4acb9d13d01717f47b1./table:/1732226648100/DeleteFamily/seqid=0 2024-11-21T22:06:21,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742351_1527 (size=8758) 2024-11-21T22:06:21,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742351_1527 (size=8758) 2024-11-21T22:06:21,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742351_1527 (size=8758) 2024-11-21T22:06:21,411 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/table/dacfad53289b459b80993892e9b05d1f 2024-11-21T22:06:21,415 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/info/8ffc0e5970734ae6b95e5397ff98c98c as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/info/8ffc0e5970734ae6b95e5397ff98c98c 2024-11-21T22:06:21,419 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/info/8ffc0e5970734ae6b95e5397ff98c98c, entries=74, sequenceid=199, filesize=14.0 K 2024-11-21T22:06:21,420 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/ns/90c4212090614e4fa20d881fc534a323 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/ns/90c4212090614e4fa20d881fc534a323 2024-11-21T22:06:21,424 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/ns/90c4212090614e4fa20d881fc534a323, entries=23, sequenceid=199, filesize=7.6 K 2024-11-21T22:06:21,425 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/rep_barrier/998e89e9209f4465ab1aceb23a28d115 as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/rep_barrier/998e89e9209f4465ab1aceb23a28d115 2024-11-21T22:06:21,429 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/rep_barrier/998e89e9209f4465ab1aceb23a28d115, entries=21, sequenceid=199, filesize=7.8 K 2024-11-21T22:06:21,430 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/.tmp/table/dacfad53289b459b80993892e9b05d1f as hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/table/dacfad53289b459b80993892e9b05d1f 2024-11-21T22:06:21,434 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/table/dacfad53289b459b80993892e9b05d1f, entries=36, sequenceid=199, filesize=8.6 K 2024-11-21T22:06:21,435 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=199, compaction requested=false 2024-11-21T22:06:21,439 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-21T22:06:21,439 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:21,439 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T22:06:21,439 INFO [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T22:06:21,439 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732226781307Running coprocessor pre-close hooks at 1732226781307Disabling compacts and flushes for region at 1732226781307Disabling writes for close at 1732226781307Obtaining lock to block concurrent updates at 1732226781307Preparing flush snapshotting stores in 1588230740 at 1732226781307Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1732226781308 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732226781308Flushing 1588230740/info: creating writer at 1732226781309 (+1 ms)Flushing 1588230740/info: appending metadata at 1732226781335 (+26 ms)Flushing 1588230740/info: closing flushed file at 1732226781335Flushing 1588230740/ns: creating writer at 1732226781346 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732226781361 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732226781361Flushing 1588230740/rep_barrier: creating writer at 1732226781370 (+9 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732226781383 (+13 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732226781383Flushing 1588230740/table: creating writer at 1732226781392 (+9 ms)Flushing 1588230740/table: appending metadata at 1732226781406 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732226781406Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ae07cbf: reopening flushed file at 1732226781414 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bddf614: reopening flushed file at 1732226781420 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cb627e4: reopening flushed file at 1732226781424 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47db204f: reopening flushed file at 1732226781429 (+5 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=199, compaction requested=false at 1732226781435 (+6 ms)Writing region close event to WAL at 1732226781436 (+1 ms)Running coprocessor post-close hooks at 1732226781439 (+3 ms)Closed at 1732226781439 2024-11-21T22:06:21,440 DEBUG [RS_CLOSE_META-regionserver/2a884b411602:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T22:06:21,508 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(976): stopping server 2a884b411602,41399,1732226514979; all regions closed. 2024-11-21T22:06:21,508 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(976): stopping server 2a884b411602,36419,1732226515183; all regions closed. 2024-11-21T22:06:21,508 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(976): stopping server 2a884b411602,44629,1732226515126; all regions closed. 2024-11-21T22:06:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741835_1011 (size=14176) 2024-11-21T22:06:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741835_1011 (size=14176) 2024-11-21T22:06:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741836_1012 (size=81723) 2024-11-21T22:06:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741836_1012 (size=81723) 2024-11-21T22:06:21,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741835_1011 (size=14176) 2024-11-21T22:06:21,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741836_1012 (size=81723) 2024-11-21T22:06:21,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741833_1009 (size=13301) 2024-11-21T22:06:21,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741833_1009 (size=13301) 2024-11-21T22:06:21,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741833_1009 (size=13301) 2024-11-21T22:06:21,520 DEBUG [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs 2024-11-21T22:06:21,520 DEBUG [RS:2;2a884b411602:36419 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs 2024-11-21T22:06:21,520 DEBUG [RS:1;2a884b411602:44629 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs 2024-11-21T22:06:21,520 INFO [RS:2;2a884b411602:36419 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2a884b411602%2C36419%2C1732226515183:(num 1732226517279) 2024-11-21T22:06:21,520 INFO [RS:1;2a884b411602:44629 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2a884b411602%2C44629%2C1732226515126:(num 1732226517304) 2024-11-21T22:06:21,520 INFO [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2a884b411602%2C41399%2C1732226514979.meta:.meta(num 1732226517666) 2024-11-21T22:06:21,520 DEBUG [RS:2;2a884b411602:36419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,520 DEBUG [RS:1;2a884b411602:44629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,520 INFO [RS:2;2a884b411602:36419 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T22:06:21,520 INFO [RS:1;2a884b411602:44629 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T22:06:21,520 INFO [RS:1;2a884b411602:44629 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T22:06:21,520 INFO [RS:2;2a884b411602:36419 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T22:06:21,520 INFO [RS:2;2a884b411602:36419 {}] hbase.ChoreService(370): Chore service for: regionserver/2a884b411602:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T22:06:21,520 INFO [RS:1;2a884b411602:44629 {}] hbase.ChoreService(370): Chore service for: regionserver/2a884b411602:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T22:06:21,521 INFO [RS:1;2a884b411602:44629 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T22:06:21,521 INFO [RS:2;2a884b411602:36419 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T22:06:21,521 INFO [RS:2;2a884b411602:36419 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T22:06:21,521 INFO [RS:1;2a884b411602:44629 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T22:06:21,521 INFO [RS:1;2a884b411602:44629 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T22:06:21,521 INFO [RS:2;2a884b411602:36419 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T22:06:21,521 INFO [regionserver/2a884b411602:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T22:06:21,521 INFO [RS:2;2a884b411602:36419 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T22:06:21,521 INFO [RS:1;2a884b411602:44629 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T22:06:21,521 INFO [regionserver/2a884b411602:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T22:06:21,521 INFO [RS:2;2a884b411602:36419 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36419 2024-11-21T22:06:21,521 INFO [RS:1;2a884b411602:44629 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44629 2024-11-21T22:06:21,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073741834_1010 (size=9476) 2024-11-21T22:06:21,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073741834_1010 (size=9476) 2024-11-21T22:06:21,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073741834_1010 (size=9476) 2024-11-21T22:06:21,526 DEBUG [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/oldWALs 2024-11-21T22:06:21,526 INFO [RS:0;2a884b411602:41399 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2a884b411602%2C41399%2C1732226514979:(num 1732226517287) 2024-11-21T22:06:21,526 DEBUG [RS:0;2a884b411602:41399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T22:06:21,526 INFO [RS:0;2a884b411602:41399 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T22:06:21,526 INFO [RS:0;2a884b411602:41399 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T22:06:21,526 INFO [RS:0;2a884b411602:41399 {}] hbase.ChoreService(370): Chore service for: regionserver/2a884b411602:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T22:06:21,526 INFO [RS:0;2a884b411602:41399 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T22:06:21,526 INFO [regionserver/2a884b411602:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T22:06:21,526 INFO [RS:0;2a884b411602:41399 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41399 2024-11-21T22:06:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T22:06:21,549 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2a884b411602,44629,1732226515126 2024-11-21T22:06:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2a884b411602,36419,1732226515183 2024-11-21T22:06:21,549 INFO [RS:1;2a884b411602:44629 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T22:06:21,549 INFO [RS:2;2a884b411602:36419 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T22:06:21,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2a884b411602,41399,1732226514979 2024-11-21T22:06:21,600 INFO [RS:0;2a884b411602:41399 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T22:06:21,611 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2a884b411602,41399,1732226514979] 2024-11-21T22:06:21,626 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2a884b411602,41399,1732226514979 already deleted, retry=false 2024-11-21T22:06:21,627 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2a884b411602,41399,1732226514979 expired; onlineServers=2 2024-11-21T22:06:21,627 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2a884b411602,36419,1732226515183] 2024-11-21T22:06:21,635 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2a884b411602,36419,1732226515183 already deleted, retry=false 2024-11-21T22:06:21,635 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2a884b411602,36419,1732226515183 expired; onlineServers=1 2024-11-21T22:06:21,635 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2a884b411602,44629,1732226515126] 2024-11-21T22:06:21,643 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2a884b411602,44629,1732226515126 already deleted, retry=false 2024-11-21T22:06:21,643 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2a884b411602,44629,1732226515126 expired; onlineServers=0 2024-11-21T22:06:21,644 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2a884b411602,42183,1732226514042' ***** 2024-11-21T22:06:21,644 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T22:06:21,644 INFO [M:0;2a884b411602:42183 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T22:06:21,645 INFO [M:0;2a884b411602:42183 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T22:06:21,645 DEBUG [M:0;2a884b411602:42183 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T22:06:21,645 DEBUG [M:0;2a884b411602:42183 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T22:06:21,645 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T22:06:21,645 DEBUG [master/2a884b411602:0:becomeActiveMaster-HFileCleaner.large.0-1732226516809 {}] cleaner.HFileCleaner(306): Exit Thread[master/2a884b411602:0:becomeActiveMaster-HFileCleaner.large.0-1732226516809,5,FailOnTimeoutGroup] 2024-11-21T22:06:21,645 DEBUG [master/2a884b411602:0:becomeActiveMaster-HFileCleaner.small.0-1732226516812 {}] cleaner.HFileCleaner(306): Exit Thread[master/2a884b411602:0:becomeActiveMaster-HFileCleaner.small.0-1732226516812,5,FailOnTimeoutGroup] 2024-11-21T22:06:21,646 INFO [M:0;2a884b411602:42183 {}] hbase.ChoreService(370): Chore service for: master/2a884b411602:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T22:06:21,646 INFO [M:0;2a884b411602:42183 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T22:06:21,646 DEBUG [M:0;2a884b411602:42183 {}] master.HMaster(1795): Stopping service threads 2024-11-21T22:06:21,646 INFO [M:0;2a884b411602:42183 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T22:06:21,647 INFO [M:0;2a884b411602:42183 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T22:06:21,648 INFO [M:0;2a884b411602:42183 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T22:06:21,648 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T22:06:21,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T22:06:21,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T22:06:21,651 DEBUG [M:0;2a884b411602:42183 {}] zookeeper.ZKUtil(347): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T22:06:21,651 WARN [M:0;2a884b411602:42183 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T22:06:21,653 INFO [M:0;2a884b411602:42183 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/.lastflushedseqids 2024-11-21T22:06:21,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35185 is added to blk_1073742352_1528 (size=329) 2024-11-21T22:06:21,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36581 is added to blk_1073742352_1528 (size=329) 2024-11-21T22:06:21,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36723 is added to blk_1073742352_1528 (size=329) 2024-11-21T22:06:21,666 INFO [M:0;2a884b411602:42183 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T22:06:21,666 INFO [M:0;2a884b411602:42183 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T22:06:21,666 DEBUG [M:0;2a884b411602:42183 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T22:06:21,677 INFO [M:0;2a884b411602:42183 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:06:21,677 DEBUG [M:0;2a884b411602:42183 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:06:21,677 DEBUG [M:0;2a884b411602:42183 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T22:06:21,677 DEBUG [M:0;2a884b411602:42183 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T22:06:21,678 INFO [M:0;2a884b411602:42183 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=823.78 KB heapSize=988.58 KB 2024-11-21T22:06:21,678 ERROR [AsyncFSWAL-0-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:06:21,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:06:21,711 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:06:21,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36419-0x10160b48b8a0003, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:06:21,711 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44629-0x10160b48b8a0002, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:06:21,712 INFO [RS:2;2a884b411602:36419 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T22:06:21,712 INFO [RS:1;2a884b411602:44629 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T22:06:21,712 INFO [RS:1;2a884b411602:44629 {}] regionserver.HRegionServer(1031): Exiting; stopping=2a884b411602,44629,1732226515126; zookeeper connection closed. 2024-11-21T22:06:21,712 INFO [RS:2;2a884b411602:36419 {}] regionserver.HRegionServer(1031): Exiting; stopping=2a884b411602,36419,1732226515183; zookeeper connection closed. 2024-11-21T22:06:21,713 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@480b2c20 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@480b2c20 2024-11-21T22:06:21,713 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@66cb305d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@66cb305d 2024-11-21T22:06:21,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:06:21,718 INFO [RS:0;2a884b411602:41399 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T22:06:21,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41399-0x10160b48b8a0001, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:06:21,719 INFO [RS:0;2a884b411602:41399 {}] regionserver.HRegionServer(1031): Exiting; stopping=2a884b411602,41399,1732226514979; zookeeper connection closed. 2024-11-21T22:06:21,719 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7bf426df {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7bf426df 2024-11-21T22:06:21,720 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-21T22:06:23,024 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:06:24,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:24,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T22:06:24,424 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T22:06:24,425 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-21T22:06:24,426 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-21T22:06:24,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:24,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-21T22:06:24,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T22:06:26,841 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T22:06:53,024 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:06:55,321 DEBUG [master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-21T22:06:55,323 DEBUG [master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-21T22:07:03,223 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2a884b411602:42183 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1efd49b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d98f3dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3356 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.CountDownLatch$Sync@285deafc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11203 Waited count: 11790 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@5cb0cb09 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26ccdd59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 664 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@2892a159-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:35817}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2790 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45619f56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 32662 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@763dd33e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 85 Waited count: 2043 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 97 Waited count: 2038 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 99 Waited count: 2048 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 76 Waited count: 2045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 83 Waited count: 2063 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(403249841)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@6d882a4b-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:44267}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 661 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38107): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40791b6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1203 Waited count: 1296 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 342 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 367 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1858979425-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1858979425-120-acceptor-0@49b48971-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:38821}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1655466367) connection to localhost/127.0.0.1:40237 from jenkins): State: TIMED_WAITING Blocked count: 1156 Waited count: 1156 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 0 Waited count: 1782 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 661 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36021): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 274 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@101d59d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1230 Waited count: 1295 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp586058363-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp586058363-154-acceptor-0@6ec10c9e-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:46809}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp586058363-155): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp586058363-156): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39123): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 249 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645fdeda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1182 Waited count: 1284 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@781a7c5b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@48d3ed30[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@68e9abfd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58709): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 165 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 306 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b69e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:58709):): State: WAITING Blocked count: 0 Waited count: 410 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5398127d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 442 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58431cd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 246 (LeaseRenewer:jenkins@localhost:40237): State: TIMED_WAITING Blocked count: 8 Waited count: 341 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5679399a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:58709)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 15 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@584a36a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3575b3fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a5ff923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 61 Waited count: 235 Waiting on java.util.concurrent.Semaphore$NonfairSync@616359e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 157 Waited count: 620 Waiting on java.util.concurrent.Semaphore$NonfairSync@41dc295d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183): State: WAITING Blocked count: 70 Waited count: 6156 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592e98db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@773a4040 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@578c093b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac9bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@19666c20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c7b170c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 78 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;2a884b411602:42183): State: TIMED_WAITING Blocked count: 12 Waited count: 2675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007f22d8f926a8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@52e8112e): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3258 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 57 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32491 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@649d724f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7044622e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d560c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7866a18d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 498 (LeaseRenewer:jenkins.hfs.2@localhost:40237): State: TIMED_WAITING Blocked count: 8 Waited count: 338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (LeaseRenewer:jenkins.hfs.0@localhost:40237): State: TIMED_WAITING Blocked count: 8 Waited count: 338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (LeaseRenewer:jenkins.hfs.1@localhost:40237): State: TIMED_WAITING Blocked count: 8 Waited count: 338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 7 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 576 Waiting on java.util.concurrent.ForkJoinPool@2460f204 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 654 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 655 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 387 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@647ce272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1247 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 486 Waiting on java.util.concurrent.ForkJoinPool@2460f204 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@2606ddb2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2037 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2781 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2782 (region-location-4): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3954 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8408 (AsyncFSWAL-1-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d5eba4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8412 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T22:07:23,024 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:07:53,025 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2a884b411602:42183 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 18 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@1efd49b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d98f3dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3955 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.CountDownLatch$Sync@75b13f64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11203 Waited count: 11791 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@5cb0cb09 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26ccdd59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 784 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@2892a159-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:35817}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2790 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45619f56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 38586 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@763dd33e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 85 Waited count: 2103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 97 Waited count: 2098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 99 Waited count: 2108 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 76 Waited count: 2105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 83 Waited count: 2123 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(403249841)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@6d882a4b-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:44267}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 781 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38107): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40791b6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1223 Waited count: 1336 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 431 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1858979425-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1858979425-120-acceptor-0@49b48971-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:38821}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1655466367) connection to localhost/127.0.0.1:40237 from jenkins): State: TIMED_WAITING Blocked count: 1216 Waited count: 1216 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 0 Waited count: 1842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 781 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36021): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 294 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@101d59d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1250 Waited count: 1335 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 409 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 409 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp586058363-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp586058363-154-acceptor-0@6ec10c9e-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:46809}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp586058363-155): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp586058363-156): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39123): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 269 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645fdeda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1202 Waited count: 1324 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 415 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@781a7c5b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@48d3ed30[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@68e9abfd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58709): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b69e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:58709):): State: WAITING Blocked count: 0 Waited count: 415 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5398127d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 447 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58431cd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5679399a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 312 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:58709)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 15 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@584a36a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3575b3fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a5ff923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 61 Waited count: 235 Waiting on java.util.concurrent.Semaphore$NonfairSync@616359e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 157 Waited count: 620 Waiting on java.util.concurrent.Semaphore$NonfairSync@41dc295d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183): State: WAITING Blocked count: 70 Waited count: 6156 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592e98db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@773a4040 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@578c093b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac9bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@19666c20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c7b170c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 78 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;2a884b411602:42183): State: TIMED_WAITING Blocked count: 12 Waited count: 2675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007f22d8f926a8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@52e8112e): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3857 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 57 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7227ef18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38494 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@649d724f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7044622e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d560c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7866a18d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 7 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 654 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 655 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@647ce272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1247 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 486 Waiting on java.util.concurrent.ForkJoinPool@2460f204 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@2606ddb2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2037 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2781 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2782 (region-location-4): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8408 (AsyncFSWAL-1-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d5eba4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8412 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T22:08:23,025 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:08:53,026 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2a884b411602:42183 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 18 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@1efd49b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d98f3dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@65ca872d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11203 Waited count: 11792 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@5cb0cb09 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26ccdd59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 905 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@2892a159-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:35817}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2790 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45619f56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44510 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@763dd33e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 85 Waited count: 2163 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 97 Waited count: 2158 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 99 Waited count: 2168 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 76 Waited count: 2165 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 83 Waited count: 2183 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(403249841)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@6d882a4b-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:44267}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 901 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38107): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40791b6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1243 Waited count: 1376 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1858979425-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1858979425-120-acceptor-0@49b48971-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:38821}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1655466367) connection to localhost/127.0.0.1:40237 from jenkins): State: TIMED_WAITING Blocked count: 1276 Waited count: 1276 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 0 Waited count: 1902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 901 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36021): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@101d59d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1270 Waited count: 1375 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp586058363-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp586058363-154-acceptor-0@6ec10c9e-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:46809}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp586058363-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp586058363-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39123): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645fdeda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1222 Waited count: 1364 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@781a7c5b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@48d3ed30[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@68e9abfd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58709): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 225 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 315 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b69e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:58709):): State: WAITING Blocked count: 0 Waited count: 419 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5398127d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 451 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58431cd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5679399a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:58709)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 15 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@584a36a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3575b3fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a5ff923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 61 Waited count: 235 Waiting on java.util.concurrent.Semaphore$NonfairSync@616359e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 157 Waited count: 620 Waiting on java.util.concurrent.Semaphore$NonfairSync@41dc295d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183): State: WAITING Blocked count: 70 Waited count: 6156 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592e98db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@773a4040 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@578c093b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac9bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@19666c20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c7b170c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 78 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;2a884b411602:42183): State: TIMED_WAITING Blocked count: 12 Waited count: 2675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007f22d8f926a8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@52e8112e): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4457 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 57 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7227ef18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44497 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@649d724f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7044622e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d560c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7866a18d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 7 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44350 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 654 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 655 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@647ce272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1247 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@2606ddb2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2037 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2781 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2782 (region-location-4): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8408 (AsyncFSWAL-1-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d5eba4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8412 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T22:09:23,026 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:09:53,026 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2a884b411602:42183 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 18 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@1efd49b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d98f3dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@4f68c8fb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11203 Waited count: 11793 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@5cb0cb09 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26ccdd59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1025 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@2892a159-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:35817}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2790 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45619f56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50434 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@763dd33e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 85 Waited count: 2223 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 97 Waited count: 2218 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 99 Waited count: 2228 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 76 Waited count: 2225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 83 Waited count: 2243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(403249841)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@6d882a4b-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:44267}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 1021 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38107): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 356 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40791b6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1263 Waited count: 1416 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1858979425-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1858979425-120-acceptor-0@49b48971-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:38821}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1655466367) connection to localhost/127.0.0.1:40237 from jenkins): State: TIMED_WAITING Blocked count: 1336 Waited count: 1336 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 0 Waited count: 1962 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 1021 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36021): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 334 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@101d59d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1290 Waited count: 1415 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 546 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 530 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp586058363-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp586058363-154-acceptor-0@6ec10c9e-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:46809}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp586058363-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp586058363-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 1020 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39123): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 309 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645fdeda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1242 Waited count: 1404 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@781a7c5b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@48d3ed30[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@68e9abfd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58709): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 255 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b69e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:58709):): State: WAITING Blocked count: 0 Waited count: 423 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5398127d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 455 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58431cd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5679399a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:58709)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 15 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@584a36a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3575b3fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 6 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a5ff923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 61 Waited count: 235 Waiting on java.util.concurrent.Semaphore$NonfairSync@616359e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 157 Waited count: 620 Waiting on java.util.concurrent.Semaphore$NonfairSync@41dc295d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183): State: WAITING Blocked count: 70 Waited count: 6156 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592e98db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@773a4040 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@578c093b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac9bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@19666c20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c7b170c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 78 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;2a884b411602:42183): State: TIMED_WAITING Blocked count: 12 Waited count: 2675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007f22d8f926a8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@52e8112e): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5056 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 57 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7227ef18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50499 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@649d724f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7044622e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d560c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7866a18d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 7 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50353 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 654 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 655 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@647ce272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@2606ddb2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2037 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2781 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2782 (region-location-4): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8408 (AsyncFSWAL-1-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d5eba4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8412 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T22:10:23,027 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:10:53,027 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:11:21,680 DEBUG [M:0;2a884b411602:42183 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732226781666Disabling compacts and flushes for region at 1732226781666Disabling writes for close at 1732226781677 (+11 ms)Obtaining lock to block concurrent updates at 1732226781678 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732226781678Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=843550, getHeapSize=1012240, getOffHeapSize=0, getCellsCount=2221 at 1732226781678Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732227081680 (+300002 ms) 2024-11-21T22:11:21,680 WARN [M:0;2a884b411602:42183 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-21T22:11:21,685 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:11:21,690 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-21T22:11:21,690 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-21T22:11:21,691 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 2024-11-21T22:11:21,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:11:21,696 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:11:21,696 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 2024-11-21T22:11:21,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;2a884b411602:42183 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 18 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@1efd49b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d98f3dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@50cfaded Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11203 Waited count: 11794 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@5cb0cb09 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@26ccdd59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@2892a159-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:35817}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2790 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45619f56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56358 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@763dd33e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 86 Waited count: 2285 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 97 Waited count: 2278 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 99 Waited count: 2288 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 76 Waited count: 2285 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 83 Waited count: 2304 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(403249841)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@6d882a4b-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:44267}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 1141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38107): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 376 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40791b6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1283 Waited count: 1457 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 602 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38107): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1858979425-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1858979425-120-acceptor-0@49b48971-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:38821}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1655466367) connection to localhost/127.0.0.1:40237 from jenkins): State: TIMED_WAITING Blocked count: 1386 Waited count: 1386 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 0 Waited count: 2021 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 1141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36021): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 354 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@101d59d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1310 Waited count: 1455 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 616 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 36021): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp586058363-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f22d842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp586058363-154-acceptor-0@6ec10c9e-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:46809}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp586058363-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp586058363-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 1140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 39123): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 329 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645fdeda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237): State: TIMED_WAITING Blocked count: 1262 Waited count: 1444 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 39123): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@781a7c5b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@48d3ed30[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@68e9abfd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58709): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 324 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b69e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:58709):): State: WAITING Blocked count: 0 Waited count: 428 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5398127d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 2 Waited count: 460 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58431cd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5679399a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:58709)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 15 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@584a36a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3575b3fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 6 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678af975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a5ff923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 61 Waited count: 235 Waiting on java.util.concurrent.Semaphore$NonfairSync@616359e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 157 Waited count: 620 Waiting on java.util.concurrent.Semaphore$NonfairSync@41dc295d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42183): State: WAITING Blocked count: 70 Waited count: 6156 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592e98db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c6df43c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@773a4040 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@578c093b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac9bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42183): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@19666c20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c7b170c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 78 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;2a884b411602:42183): State: TIMED_WAITING Blocked count: 12 Waited count: 2676 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1350/0x00007f22d91fb8d8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/2a884b411602:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@52e8112e): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5655 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 57 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7227ef18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56501 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@649d724f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 481 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7044622e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d560c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/2a884b411602:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7866a18d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 7 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56355 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 654 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 655 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 988 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@647ce272 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@2606ddb2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2037 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2781 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2782 (region-location-4): State: WAITING Blocked count: 1 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77e6abc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8408 (AsyncFSWAL-1-hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData-prefix:2a884b411602,42183,1732226514042): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d5eba4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8412 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8413 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8416 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8417 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1338/0x00007f22d91f3fa0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-21T22:11:23,028 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T22:11:25,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T22:11:26,686 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-21T22:11:26,686 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T22:11:26,687 INFO [M:0;2a884b411602:42183 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T22:11:26,687 INFO [M:0;2a884b411602:42183 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42183 2024-11-21T22:11:26,688 INFO [M:0;2a884b411602:42183 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T22:11:26,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:40237/user/jenkins/test-data/dd5b1ce3-8ff9-ffa5-e8a5-1e6398dfab90/MasterData/WALs/2a884b411602,42183,1732226514042/2a884b411602%2C42183%2C1732226514042.1732226515752 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-21T22:11:26,836 INFO [M:0;2a884b411602:42183 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T22:11:26,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:11:26,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42183-0x10160b48b8a0000, quorum=127.0.0.1:58709, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T22:11:26,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d3bb473{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T22:11:26,844 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:11:26,844 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:11:26,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@544a54bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T22:11:26,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e41eadb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:11:26,847 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T22:11:26,847 WARN [BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T22:11:26,847 WARN [BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1864559489-172.17.0.2-1732226508877 (Datanode Uuid e1949d46-2557-4501-97db-f758381d874f) service to localhost/127.0.0.1:40237 2024-11-21T22:11:26,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T22:11:26,850 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data5/current/BP-1864559489-172.17.0.2-1732226508877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T22:11:26,850 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data6/current/BP-1864559489-172.17.0.2-1732226508877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T22:11:26,850 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T22:11:26,852 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19299b82{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T22:11:26,852 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:11:26,853 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:11:26,853 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@246a58e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T22:11:26,853 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ed86ab3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:11:26,854 WARN [BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T22:11:26,854 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T22:11:26,854 WARN [BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1864559489-172.17.0.2-1732226508877 (Datanode Uuid fd312820-d742-438d-9626-f8ad2cae6b0d) service to localhost/127.0.0.1:40237 2024-11-21T22:11:26,854 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T22:11:26,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data3/current/BP-1864559489-172.17.0.2-1732226508877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T22:11:26,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data4/current/BP-1864559489-172.17.0.2-1732226508877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T22:11:26,855 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T22:11:26,857 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1adf9305{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T22:11:26,857 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:11:26,857 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:11:26,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@480cc207{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T22:11:26,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134642c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:11:26,859 WARN [BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T22:11:26,859 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T22:11:26,859 WARN [BP-1864559489-172.17.0.2-1732226508877 heartbeating to localhost/127.0.0.1:40237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1864559489-172.17.0.2-1732226508877 (Datanode Uuid 2ea3eedb-5a78-449a-89b6-9d20737bb165) service to localhost/127.0.0.1:40237 2024-11-21T22:11:26,859 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T22:11:26,860 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data1/current/BP-1864559489-172.17.0.2-1732226508877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T22:11:26,860 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/cluster_55e2bba0-fbc3-c74f-5f7f-5690bd26f222/data/data2/current/BP-1864559489-172.17.0.2-1732226508877 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T22:11:26,860 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T22:11:26,867 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a55f3e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T22:11:26,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T22:11:26,868 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T22:11:26,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T22:11:26,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/70e14ce4-4977-93b2-4bca-fb684e970894/hadoop.log.dir/,STOPPED} 2024-11-21T22:11:26,886 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T22:11:26,981 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down